diff --git a/cache/remotecache/s3/s3.go b/cache/remotecache/s3/s3.go index 201c51cf7b34..e5ef04cf75ec 100644 --- a/cache/remotecache/s3/s3.go +++ b/cache/remotecache/s3/s3.go @@ -6,19 +6,17 @@ import ( "encoding/json" "fmt" "io" + "net/http" + "net/url" "os" "strconv" "strings" "time" - "github.com/aws/aws-sdk-go-v2/aws" - aws_config "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/feature/s3/manager" - "github.com/aws/aws-sdk-go-v2/service/s3" - s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/containerd/containerd/v2/core/content" "github.com/containerd/containerd/v2/pkg/labels" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" "github.com/moby/buildkit/cache/remotecache" v1 "github.com/moby/buildkit/cache/remotecache/v1" cacheimporttypes "github.com/moby/buildkit/cache/remotecache/v1/types" @@ -77,40 +75,33 @@ func getConfig(attrs map[string]string) (Config, error) { region, ok := attrs[attrRegion] if !ok { - region, ok = os.LookupEnv("AWS_REGION") - if !ok { - return Config{}, errors.Errorf("region ($AWS_REGION) not set for s3 cache") - } + region, _ = os.LookupEnv("AWS_REGION") // optional for minio; keep semantics } prefix := attrs[attrPrefix] manifestsPrefix, ok := attrs[attrManifestsPrefix] - if !ok { + if !ok || manifestsPrefix == "" { manifestsPrefix = "manifests/" } blobsPrefix, ok := attrs[attrBlobsPrefix] - if !ok { + if !ok || blobsPrefix == "" { blobsPrefix = "blobs/" } names := []string{"buildkit"} - name, ok := attrs[attrName] - if ok { - splittedNames := strings.Split(name, ";") - if len(splittedNames) > 0 { - names = splittedNames + if name, ok := attrs[attrName]; ok && name != "" { + splitted := strings.Split(name, ";") + if len(splitted) > 0 { + names = splitted } } touchRefresh := 24 * time.Hour - - touchRefreshStr, ok := attrs[attrTouchRefresh] - if ok { - touchRefreshFromUser, err := time.ParseDuration(touchRefreshStr) - if err == nil { - touchRefresh = touchRefreshFromUser + if v, ok := attrs[attrTouchRefresh]; ok && v != "" { + if d, err := time.ParseDuration(v); err == nil { + touchRefresh = d } } @@ -120,25 +111,20 @@ func getConfig(attrs map[string]string) (Config, error) { sessionToken := attrs[attrSessionToken] usePathStyle := false - usePathStyleStr, ok := attrs[attrUsePathStyle] - if ok { - usePathStyleUser, err := strconv.ParseBool(usePathStyleStr) + if v, ok := attrs[attrUsePathStyle]; ok && v != "" { + parsed, err := strconv.ParseBool(v) if err == nil { - usePathStyle = usePathStyleUser + usePathStyle = parsed } } uploadParallelism := 4 - uploadParallelismStr, ok := attrs[attrUploadParallelism] - if ok { - uploadParallelismInt, err := strconv.Atoi(uploadParallelismStr) - if err != nil { + if v, ok := attrs[attrUploadParallelism]; ok && v != "" { + iv, err := strconv.Atoi(v) + if err != nil || iv <= 0 { return Config{}, errors.Errorf("upload_parallelism must be a positive integer") } - if uploadParallelismInt <= 0 { - return Config{}, errors.Errorf("upload_parallelism must be a positive integer") - } - uploadParallelism = uploadParallelismInt + uploadParallelism = iv } return Config{ @@ -158,7 +144,6 @@ func getConfig(attrs map[string]string) (Config, error) { }, nil } -// ResolveCacheExporterFunc for s3 cache exporter. func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { config, err := getConfig(attrs) @@ -166,45 +151,40 @@ func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { return nil, err } - s3Client, err := newS3Client(ctx, config) + minioClient, err := newMinioClient(config) if err != nil { return nil, err } - cc := v1.NewCacheChains() - return &exporter{CacheExporterTarget: cc, chains: cc, s3Client: s3Client, config: config}, nil + + cacheChains := v1.NewCacheChains() + return &exporter{CacheExporterTarget: cacheChains, chains: cacheChains, minioClient: minioClient, config: config}, nil } } type exporter struct { solver.CacheExporterTarget - chains *v1.CacheChains - s3Client *s3Client - config Config + chains *v1.CacheChains + minioClient *minioClient + config Config } -func (*exporter) Name() string { - return "exporting cache to Amazon S3" -} +func (*exporter) Name() string { return "exporting cache to S3" } func (e *exporter) Config() remotecache.Config { - return remotecache.Config{ - Compression: compression.New(compression.Default), - } + return remotecache.Config{Compression: compression.New(compression.Default)} } -type nopCloserSectionReader struct { - *io.SectionReader -} +type nopCloserSectionReader struct{ *io.SectionReader } func (*nopCloserSectionReader) Close() error { return nil } func (e *exporter) Finalize(ctx context.Context) (map[string]string, error) { - cacheConfig, descs, err := e.chains.Marshal(ctx) + cacheConfig, descriptors, err := e.chains.Marshal(ctx) if err != nil { return nil, err } - eg, groupCtx := errgroup.WithContext(ctx) + errorGroup, groupContext := errgroup.WithContext(ctx) tasks := make(chan int, e.config.UploadParallelism) go func() { @@ -214,131 +194,127 @@ func (e *exporter) Finalize(ctx context.Context) (map[string]string, error) { close(tasks) }() - for range e.config.UploadParallelism { - eg.Go(func() error { + for workerIndex := 0; workerIndex < e.config.UploadParallelism; workerIndex++ { + errorGroup.Go(func() error { for index := range tasks { blob := cacheConfig.Layers[index].Blob - dgstPair, ok := descs[blob] + descriptorProviderPair, ok := descriptors[blob] if !ok { return errors.Errorf("missing blob %s", blob) } - if dgstPair.Descriptor.Annotations == nil { + if descriptorProviderPair.Descriptor.Annotations == nil { return errors.Errorf("invalid descriptor without annotations") } - v, ok := dgstPair.Descriptor.Annotations[labels.LabelUncompressed] + uncompressedAnnotation, ok := descriptorProviderPair.Descriptor.Annotations[labels.LabelUncompressed] if !ok { return errors.Errorf("invalid descriptor without uncompressed annotation") } - diffID, err := digest.Parse(v) + diffID, err := digest.Parse(uncompressedAnnotation) if err != nil { return errors.Wrapf(err, "failed to parse uncompressed annotation") } - key := e.s3Client.blobKey(dgstPair.Descriptor.Digest) - exists, size, err := e.s3Client.exists(groupCtx, key) + key := e.minioClient.blobKey(descriptorProviderPair.Descriptor.Digest) + lastMod, err := e.minioClient.exists(groupContext, key) if err != nil { return errors.Wrapf(err, "failed to check file presence in cache") } - if exists != nil { - if time.Since(*exists) > e.config.TouchRefresh { - err = e.s3Client.touch(groupCtx, key, size) - if err != nil { + if lastMod != nil { + if time.Since(*lastMod) > e.config.TouchRefresh { + if err := e.minioClient.touch(groupContext, key); err != nil { return errors.Wrapf(err, "failed to touch file") } } } else { - layerDone := progress.OneOff(groupCtx, fmt.Sprintf("writing layer %s", blob)) - // TODO: once buildkit uses v2, start using - // https://github.com/containerd/containerd/pull/9657 - // currently inline data should never happen. - ra, err := dgstPair.Provider.ReaderAt(groupCtx, dgstPair.Descriptor) + layerDone := progress.OneOff(groupContext, fmt.Sprintf("writing layer %s", blob)) + readerAt, err := descriptorProviderPair.Provider.ReaderAt(groupContext, descriptorProviderPair.Descriptor) if err != nil { return layerDone(errors.Wrap(err, "error reading layer blob from provider")) } - defer ra.Close() - if err := e.s3Client.saveMutableAt(groupCtx, key, &nopCloserSectionReader{io.NewSectionReader(ra, 0, ra.Size())}); err != nil { + defer readerAt.Close() + + section := &nopCloserSectionReader{io.NewSectionReader(readerAt, 0, readerAt.Size())} + if err := e.minioClient.saveMutableAt(groupContext, key, section, readerAt.Size()); err != nil { return layerDone(errors.Wrap(err, "error writing layer blob")) } layerDone(nil) } - la := &cacheimporttypes.LayerAnnotations{ + layerAnnotations := &cacheimporttypes.LayerAnnotations{ DiffID: diffID, - Size: dgstPair.Descriptor.Size, - MediaType: dgstPair.Descriptor.MediaType, + Size: descriptorProviderPair.Descriptor.Size, + MediaType: descriptorProviderPair.Descriptor.MediaType, } - if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok { - var t time.Time - if err := (&t).UnmarshalText([]byte(v)); err != nil { + if createdAt, ok := descriptorProviderPair.Descriptor.Annotations["buildkit/createdat"]; ok { + var createdAtTime time.Time + if err := (&createdAtTime).UnmarshalText([]byte(createdAt)); err != nil { return err } - la.CreatedAt = t.UTC() + layerAnnotations.CreatedAt = createdAtTime.UTC() } - cacheConfig.Layers[index].Annotations = la + cacheConfig.Layers[index].Annotations = layerAnnotations } return nil }) } - if err := eg.Wait(); err != nil { + if err := errorGroup.Wait(); err != nil { return nil, err } - dt, err := json.Marshal(cacheConfig) + manifestData, err := json.Marshal(cacheConfig) if err != nil { return nil, err } for _, name := range e.config.Names { - if err := e.s3Client.saveMutableAt(ctx, e.s3Client.manifestKey(name), bytes.NewReader(dt)); err != nil { + if err := e.minioClient.saveMutableAt(ctx, e.minioClient.manifestKey(name), bytes.NewReader(manifestData), int64(len(manifestData))); err != nil { return nil, errors.Wrapf(err, "error writing manifest: %s", name) } } return nil, nil } -// ResolveCacheImporterFunc for s3 cache importer. func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc { return func(ctx context.Context, _ session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) { config, err := getConfig(attrs) if err != nil { return nil, ocispecs.Descriptor{}, err } - s3Client, err := newS3Client(ctx, config) + minioClient, err := newMinioClient(config) if err != nil { return nil, ocispecs.Descriptor{}, err } - return &importer{s3Client, config}, ocispecs.Descriptor{}, nil + return &importer{minioClient: minioClient, config: config}, ocispecs.Descriptor{}, nil } } type importer struct { - s3Client *s3Client - config Config + minioClient *minioClient + config Config } -func (i *importer) makeDescriptorProviderPair(l cacheimporttypes.CacheLayer) (*v1.DescriptorProviderPair, error) { - if l.Annotations == nil { +func (i *importer) makeDescriptorProviderPair(layer cacheimporttypes.CacheLayer) (*v1.DescriptorProviderPair, error) { + if layer.Annotations == nil { return nil, errors.Errorf("cache layer with missing annotations") } - if l.Annotations.DiffID == "" { + if layer.Annotations.DiffID == "" { return nil, errors.Errorf("cache layer with missing diffid") } - annotations := map[string]string{} - annotations[labels.LabelUncompressed] = l.Annotations.DiffID.String() - if !l.Annotations.CreatedAt.IsZero() { - txt, err := l.Annotations.CreatedAt.MarshalText() - if err != nil { + annotations := map[string]string{labels.LabelUncompressed: layer.Annotations.DiffID.String()} + if !layer.Annotations.CreatedAt.IsZero() { + if createdAtText, err := layer.Annotations.CreatedAt.MarshalText(); err == nil { + annotations["buildkit/createdat"] = string(createdAtText) + } else { return nil, err } - annotations["buildkit/createdat"] = string(txt) } return &v1.DescriptorProviderPair{ - Provider: i.s3Client, + Provider: i.minioClient, Descriptor: ocispecs.Descriptor{ - MediaType: l.Annotations.MediaType, - Digest: l.Blob, - Size: l.Annotations.Size, + MediaType: layer.Annotations.MediaType, + Digest: layer.Blob, + Size: layer.Annotations.Size, Annotations: annotations, }, }, nil @@ -346,7 +322,7 @@ func (i *importer) makeDescriptorProviderPair(l cacheimporttypes.CacheLayer) (*v func (i *importer) load(ctx context.Context) (*v1.CacheChains, error) { var config cacheimporttypes.CacheConfig - found, err := i.s3Client.getManifest(ctx, i.s3Client.manifestKey(i.config.Names[0]), &config) + found, err := i.minioClient.getManifest(ctx, i.minioClient.manifestKey(i.config.Names[0]), &config) if err != nil { return nil, err } @@ -355,33 +331,29 @@ func (i *importer) load(ctx context.Context) (*v1.CacheChains, error) { } allLayers := v1.DescriptorProvider{} - - for _, l := range config.Layers { - dpp, err := i.makeDescriptorProviderPair(l) + for _, layer := range config.Layers { + descriptorProviderPair, err := i.makeDescriptorProviderPair(layer) if err != nil { return nil, err } - allLayers[l.Blob] = *dpp + allLayers[layer.Blob] = *descriptorProviderPair } - - cc := v1.NewCacheChains() - if err := v1.ParseConfig(config, allLayers, cc); err != nil { + cacheChains := v1.NewCacheChains() + if err := v1.ParseConfig(config, allLayers, cacheChains); err != nil { return nil, err } - return cc, nil + return cacheChains, nil } func (i *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { - cc, err := i.load(ctx) + cacheChains, err := i.load(ctx) if err != nil { return nil, err } - - keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cacheChains, w) if err != nil { return nil, err } - return solver.NewCacheManager(ctx, id, keysStorage, resultStorage), nil } @@ -390,37 +362,53 @@ type readerAt struct { size int64 } -func (r *readerAt) Size() int64 { - return r.size +func (r *readerAt) ReadAt(p []byte, off int64) (int, error) { + if len(p) == 0 { + return 0, nil + } + if off >= r.size { + return 0, io.EOF + } + return r.ReaderAtCloser.ReadAt(p, off) } -type s3Client struct { - *s3.Client - *manager.Uploader +func (r *readerAt) Size() int64 { return r.size } + +type minioClient struct { + client *minio.Client bucket string prefix string blobsPrefix string manifestsPrefix string } -func newS3Client(ctx context.Context, config Config) (*s3Client, error) { - cfg, err := aws_config.LoadDefaultConfig(ctx, aws_config.WithRegion(config.Region)) +func newMinioClient(config Config) (*minioClient, error) { + if config.EndpointURL == "" { + config.EndpointURL = "https://s3.amazonaws.com" + } + + parsedURL, err := url.Parse(config.EndpointURL) if err != nil { - return nil, errors.Errorf("Unable to load AWS SDK config, %v", err) + return nil, errors.Wrap(err, "invalid endpoint_url") } - client := s3.NewFromConfig(cfg, func(options *s3.Options) { - if config.AccessKeyID != "" && config.SecretAccessKey != "" { - options.Credentials = credentials.NewStaticCredentialsProvider(config.AccessKeyID, config.SecretAccessKey, config.SessionToken) - } - if config.EndpointURL != "" { - options.UsePathStyle = config.UsePathStyle - options.BaseEndpoint = aws.String(config.EndpointURL) - } + + bucketLookup := minio.BucketLookupDNS + if config.UsePathStyle { + bucketLookup = minio.BucketLookupPath + } + + client, err := minio.New(parsedURL.Host, &minio.Options{ + Creds: credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, config.SessionToken), + Secure: parsedURL.Scheme == "https", + Region: config.Region, + BucketLookup: bucketLookup, }) + if err != nil { + return nil, errors.Wrap(err, "create minio client") + } - return &s3Client{ - Client: client, - Uploader: manager.NewUploader(client), + return &minioClient{ + client: client, bucket: config.Bucket, prefix: config.Prefix, blobsPrefix: config.BlobsPrefix, @@ -428,184 +416,90 @@ func newS3Client(ctx context.Context, config Config) (*s3Client, error) { }, nil } -func (s3Client *s3Client) getManifest(ctx context.Context, key string, config *cacheimporttypes.CacheConfig) (bool, error) { - input := &s3.GetObjectInput{ - Bucket: &s3Client.bucket, - Key: &key, - } - - output, err := s3Client.GetObject(ctx, input) +func (m *minioClient) getManifest(ctx context.Context, key string, config *cacheimporttypes.CacheConfig) (bool, error) { + object, err := m.client.GetObject(ctx, m.bucket, key, minio.GetObjectOptions{}) if err != nil { if isNotFound(err) { return false, nil } return false, err } - defer output.Body.Close() - - decoder := json.NewDecoder(output.Body) + defer object.Close() + decoder := json.NewDecoder(object) if err := decoder.Decode(config); err != nil { return false, errors.WithStack(err) } if _, err := decoder.Token(); !errors.Is(err, io.EOF) { return false, errors.Errorf("unexpected data after JSON object") } - return true, nil } -func (s3Client *s3Client) getReader(ctx context.Context, key string, offset int64) (io.ReadCloser, error) { - input := &s3.GetObjectInput{ - Bucket: &s3Client.bucket, - Key: &key, - } +func (m *minioClient) getReader(ctx context.Context, key string, offset int64) (io.ReadCloser, error) { + getOptions := minio.GetObjectOptions{} if offset > 0 { - input.Range = aws.String(fmt.Sprintf("bytes=%d-", offset)) + if err := getOptions.SetRange(offset, 0); err != nil { + return nil, err + } } - - output, err := s3Client.GetObject(ctx, input) + object, err := m.client.GetObject(ctx, m.bucket, key, getOptions) if err != nil { return nil, err } - return output.Body, nil + return object, nil } -func (s3Client *s3Client) saveMutableAt(ctx context.Context, key string, body io.Reader) error { - input := &s3.PutObjectInput{ - Bucket: &s3Client.bucket, - Key: &key, - Body: body, - } - _, err := s3Client.Upload(ctx, input) +func (m *minioClient) saveMutableAt(ctx context.Context, key string, body io.Reader, size int64) error { + _, err := m.client.PutObject(ctx, m.bucket, key, body, size, minio.PutObjectOptions{}) return err } -func (s3Client *s3Client) exists(ctx context.Context, key string) (*time.Time, *int64, error) { - input := &s3.HeadObjectInput{ - Bucket: &s3Client.bucket, - Key: &key, - } - - head, err := s3Client.HeadObject(ctx, input) +func (m *minioClient) exists(ctx context.Context, key string) (*time.Time, error) { + stat, err := m.client.StatObject(ctx, m.bucket, key, minio.StatObjectOptions{}) if err != nil { if isNotFound(err) { - return nil, nil, nil + return nil, nil } - return nil, nil, err + return nil, err } - return head.LastModified, head.ContentLength, nil + lastModified := stat.LastModified + return &lastModified, nil } -func buildCopySourceRange(start int64, objectSize int64) string { - end := start + maxCopyObjectSize - 1 - if end > objectSize { - end = objectSize - 1 +func (m *minioClient) touch(ctx context.Context, key string) error { + // Minio will internally perform multipart copy when supported + copySourceOptions := minio.CopySrcOptions{Bucket: m.bucket, Object: key} + copyDestinationOptions := minio.CopyDestOptions{ + Bucket: m.bucket, + Object: key, + ReplaceMetadata: true, + UserMetadata: map[string]string{"updated-at": time.Now().UTC().Format(time.RFC3339Nano)}, } - startRange := strconv.FormatInt(start, 10) - stopRange := strconv.FormatInt(end, 10) - return "bytes=" + startRange + "-" + stopRange -} - -func (s3Client *s3Client) touch(ctx context.Context, key string, size *int64) (err error) { - copySource := fmt.Sprintf("%s/%s", s3Client.bucket, key) - - // CopyObject does not support files > 5GB - if *size < maxCopyObjectSize { - cp := &s3.CopyObjectInput{ - Bucket: &s3Client.bucket, - CopySource: ©Source, - Key: &key, - Metadata: map[string]string{"updated-at": time.Now().String()}, - MetadataDirective: "REPLACE", - } - - _, err := s3Client.CopyObject(ctx, cp) - - return err - } - input := &s3.CreateMultipartUploadInput{ - Bucket: &s3Client.bucket, - Key: &key, - } - - output, err := s3Client.CreateMultipartUpload(ctx, input) - if err != nil { - return err - } - - defer func() { - abortIn := s3.AbortMultipartUploadInput{ - Bucket: &s3Client.bucket, - Key: &key, - UploadId: output.UploadId, - } - if err != nil { - s3Client.AbortMultipartUpload(ctx, &abortIn) - } - }() - - var currentPartNumber int32 = 1 - var currentPosition int64 - var completedParts []s3types.CompletedPart - - for currentPosition < *size { - copyRange := buildCopySourceRange(currentPosition, *size) - partInput := s3.UploadPartCopyInput{ - Bucket: &s3Client.bucket, - CopySource: ©Source, - CopySourceRange: ©Range, - Key: &key, - PartNumber: ¤tPartNumber, - UploadId: output.UploadId, - } - uploadPartCopyResult, err := s3Client.UploadPartCopy(ctx, &partInput) - if err != nil { - return err - } - partNumber := new(int32) - *partNumber = currentPartNumber - completedParts = append(completedParts, s3types.CompletedPart{ - ETag: uploadPartCopyResult.CopyPartResult.ETag, - PartNumber: partNumber, - }) - - currentPartNumber++ - currentPosition += maxCopyObjectSize - } - - completeMultipartUploadInput := &s3.CompleteMultipartUploadInput{ - Bucket: &s3Client.bucket, - Key: &key, - UploadId: output.UploadId, - MultipartUpload: &s3types.CompletedMultipartUpload{ - Parts: completedParts, - }, - } - - if _, err := s3Client.CompleteMultipartUpload(ctx, completeMultipartUploadInput); err != nil { - return err - } - - return nil + _, err := m.client.CopyObject(ctx, copyDestinationOptions, copySourceOptions) + return err } -func (s3Client *s3Client) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { +func (m *minioClient) ReaderAt(ctx context.Context, descriptor ocispecs.Descriptor) (content.ReaderAt, error) { readerAtCloser := toReaderAtCloser(func(offset int64) (io.ReadCloser, error) { - return s3Client.getReader(ctx, s3Client.blobKey(desc.Digest), offset) + return m.getReader(ctx, m.blobKey(descriptor.Digest), offset) }) - return &readerAt{ReaderAtCloser: readerAtCloser, size: desc.Size}, nil + return &readerAt{ReaderAtCloser: readerAtCloser, size: descriptor.Size}, nil } -func (s3Client *s3Client) manifestKey(name string) string { - return s3Client.prefix + s3Client.manifestsPrefix + name -} +func (m *minioClient) manifestKey(name string) string { return m.prefix + m.manifestsPrefix + name } -func (s3Client *s3Client) blobKey(dgst digest.Digest) string { - return s3Client.prefix + s3Client.blobsPrefix + dgst.String() +func (m *minioClient) blobKey(digestValue digest.Digest) string { + return m.prefix + m.blobsPrefix + digestValue.String() } func isNotFound(err error) bool { - var nf *s3types.NotFound - var nsk *s3types.NoSuchKey - return errors.As(err, &nf) || errors.As(err, &nsk) + resp := minio.ToErrorResponse(err) + if resp.StatusCode == http.StatusNotFound { + return true + } + switch strings.ToLower(resp.Code) { + case "nosuchkey", "notfound", "no such key": + return true + } + return false } diff --git a/go.mod b/go.mod index 96906ac40cd7..0f6a39a79050 100644 --- a/go.mod +++ b/go.mod @@ -11,11 +11,6 @@ require ( github.com/ProtonMail/go-crypto v1.3.0 github.com/agext/levenshtein v1.2.3 github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 - github.com/aws/aws-sdk-go-v2 v1.38.1 - github.com/aws/aws-sdk-go-v2/config v1.31.3 - github.com/aws/aws-sdk-go-v2/credentials v1.18.7 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 - github.com/aws/aws-sdk-go-v2/service/s3 v1.87.1 github.com/cespare/xxhash/v2 v2.3.0 github.com/containerd/accelerated-container-image v1.3.0 github.com/containerd/console v1.0.5 @@ -47,6 +42,7 @@ require ( github.com/hiddeco/sshsig v0.2.0 github.com/in-toto/in-toto-golang v0.9.0 github.com/klauspost/compress v1.18.1 + github.com/minio/minio-go/v7 v7.0.97 github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/moby/docker-image-spec v1.3.1 github.com/moby/go-archive v0.1.0 @@ -118,20 +114,6 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.28.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 // indirect - github.com/aws/smithy-go v1.22.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cloudflare/circl v1.6.0 // indirect @@ -146,10 +128,11 @@ require ( github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/fatih/color v1.18.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -160,26 +143,33 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hanwen/go-fuse/v2 v2.8.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.11 // indirect + github.com/klauspost/crc32 v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect + github.com/minio/crc64nvme v1.1.0 // indirect + github.com/minio/md5-simd v1.1.2 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mount v0.3.4 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 // indirect github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect + github.com/philhofer/fwd v1.2.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect + github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/tinylib/msgp v1.3.0 // indirect github.com/vbatts/tar-split v0.12.1 // indirect github.com/vishvananda/netns v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/text v0.29.0 // indirect diff --git a/go.sum b/go.sum index dc725a7a0077..dd48949cf9ed 100644 --- a/go.sum +++ b/go.sum @@ -35,44 +35,6 @@ github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1 github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/aws/aws-sdk-go-v2 v1.38.1 h1:j7sc33amE74Rz0M/PoCpsZQ6OunLqys/m5antM0J+Z8= -github.com/aws/aws-sdk-go-v2 v1.38.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= -github.com/aws/aws-sdk-go-v2/config v1.31.3 h1:RIb3yr/+PZ18YYNe6MDiG/3jVoJrPmdoCARwNkMGvco= -github.com/aws/aws-sdk-go-v2/config v1.31.3/go.mod h1:jjgx1n7x0FAKl6TnakqrpkHWWKcX3xfWtdnIJs5K9CE= -github.com/aws/aws-sdk-go-v2/credentials v1.18.7 h1:zqg4OMrKj+t5HlswDApgvAHjxKtlduKS7KicXB+7RLg= -github.com/aws/aws-sdk-go-v2/credentials v1.18.7/go.mod h1:/4M5OidTskkgkv+nCIfC9/tbiQ/c8qTox9QcUDV0cgc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.4 h1:lpdMwTzmuDLkgW7086jE94HweHCqG+uOJwHf3LZs7T0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.4/go.mod h1:9xzb8/SV62W6gHQGC/8rrvgNXU6ZoYM3sAIJCIrXJxY= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 h1:zeN9UtUlA6FTx0vFSayxSX32HDw73Yb6Hh2izDSFxXY= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10/go.mod h1:3HKuexPDcwLWPaqpW2UR/9n8N/u/3CKcGAzSs8p8u8g= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4 h1:IdCLsiiIj5YJ3AFevsewURCPV+YWUlOW8JiPhoAy8vg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4/go.mod h1:l4bdfCD7XyyZA9BolKBo1eLqgaJxl0/x91PL4Yqe0ao= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4 h1:j7vjtr1YIssWQOMeOWRbh3z8g2oY/xPjnZH2gLY4sGw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4/go.mod h1:yDmJgqOiH4EA8Hndnv4KwAo8jCGTSnM5ASG1nBI+toA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.4 h1:BE/MNQ86yzTINrfxPPFS86QCBNQeLKY2A0KhDh47+wI= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.4/go.mod h1:SPBBhkJxjcrzJBc+qY85e83MQ2q3qdra8fghhkkyrJg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.4 h1:Beh9oVgtQnBgR4sKKzkUBRQpf1GnL4wt0l4s8h2VCJ0= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.4/go.mod h1:b17At0o8inygF+c6FOD3rNyYZufPw62o9XJbSfQPgbo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4 h1:ueB2Te0NacDMnaC+68za9jLwkjzxGWm0KB5HTUHjLTI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4/go.mod h1:nLEfLnVMmLvyIG58/6gsSA03F1voKGaCfHV7+lR8S7s= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.4 h1:HVSeukL40rHclNcUqVcBwE1YoZhOkoLeBfhUqR3tjIU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.4/go.mod h1:DnbBOv4FlIXHj2/xmrUQYtawRFC9L9ZmQPz+DBc6X5I= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.1 h1:2n6Pd67eJwAb/5KCX62/8RTU0aFAAW7V5XIGSghiHrw= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.1/go.mod h1:w5PC+6GHLkvMJKasYGVloB3TduOtROEMqm15HSuIbw4= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.2 h1:ve9dYBB8CfJGTFqcQ3ZLAAb/KXWgYlgu/2R2TZL2Ko0= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.2/go.mod h1:n9bTZFZcBa9hGGqVz3i/a6+NG0zmZgtkB9qVVFDqPA8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.0 h1:Bnr+fXrlrPEoR1MAFrHVsge3M/WoK4n23VNhRM7TPHI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.0/go.mod h1:eknndR9rU8UpE/OmFpqU78V1EcXPKFTTm5l/buZYgvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.0 h1:iV1Ko4Em/lkJIsoKyGfc0nQySi+v0Udxr6Igq+y9JZc= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.0/go.mod h1:bEPcjW7IbolPfK67G1nilqWyoxYMSPrDiIQ3RdIdKgo= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -157,18 +119,22 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -257,6 +223,11 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU= +github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM= +github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -270,6 +241,12 @@ github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stg github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q= +github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ= +github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -329,6 +306,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= +github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -359,8 +338,10 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= @@ -394,6 +375,8 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 h1:r0p7fK56l8WPequOaR3i9LBqfPtEdXIQbUTzT55iqT4= github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY= github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f h1:MoxeMfHAe5Qj/ySSBfL8A7l1V+hxuluj8owsIEEZipI= @@ -428,8 +411,8 @@ go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 h1:lREC4C0ilyP4WibDhQ7Gg2ygAQFP8oR07Fst/5cafwI= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt deleted file mode 100644 index 899129ecc465..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go deleted file mode 100644 index 6504a21864cb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go +++ /dev/null @@ -1,18 +0,0 @@ -package aws - -// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. -type AccountIDEndpointMode string - -const ( - // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing - AccountIDEndpointModeUnset AccountIDEndpointMode = "" - - // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present - AccountIDEndpointModePreferred = "preferred" - - // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity - AccountIDEndpointModeRequired = "required" - - // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing - AccountIDEndpointModeDisabled = "disabled" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go deleted file mode 100644 index fe63fedadd68..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package arn provides a parser for interacting with Amazon Resource Names. -package arn - -import ( - "errors" - "strings" -) - -const ( - arnDelimiter = ":" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 - - // errors - invalidPrefix = "arn: invalid prefix" - invalidSections = "arn: not enough sections" -) - -// ARN captures the individual fields of an Amazon Resource Name. -// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. -type ARN struct { - // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in - // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China - // (Beijing) region is "aws-cn". - Partition string - - // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of - // namespaces, see - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. - Service string - - // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this - // component might be omitted. - Region string - - // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the - // ARNs for some resources don't require an account number, so this component might be omitted. - AccountID string - - // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — - // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the - // resource name itself. Some services allows paths for resource names, as described in - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. - Resource string -} - -// Parse parses an ARN into its constituent parts. -// -// Some example ARNs: -// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment -// arn:aws:iam::123456789012:user/David -// arn:aws:rds:eu-west-1:123456789012:db:mysql-db -// arn:aws:s3:::my_corporate_bucket/exampleobject.png -func Parse(arn string) (ARN, error) { - if !strings.HasPrefix(arn, arnPrefix) { - return ARN{}, errors.New(invalidPrefix) - } - sections := strings.SplitN(arn, arnDelimiter, arnSections) - if len(sections) != arnSections { - return ARN{}, errors.New(invalidSections) - } - return ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountID: sections[sectionAccountID], - Resource: sections[sectionResource], - }, nil -} - -// IsARN returns whether the given string is an arn -// by looking for whether the string starts with arn: -func IsARN(arn string) bool { - return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 -} - -// String returns the canonical representation of the ARN -func (arn ARN) String() string { - return arnPrefix + - arn.Partition + arnDelimiter + - arn.Service + arnDelimiter + - arn.Region + arnDelimiter + - arn.AccountID + arnDelimiter + - arn.Resource -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go deleted file mode 100644 index 4152caade10e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -// RequestChecksumCalculation controls request checksum calculation workflow -type RequestChecksumCalculation int - -const ( - // RequestChecksumCalculationUnset is the unset value for RequestChecksumCalculation - RequestChecksumCalculationUnset RequestChecksumCalculation = iota - - // RequestChecksumCalculationWhenSupported indicates request checksum will be calculated - // if the operation supports input checksums - RequestChecksumCalculationWhenSupported - - // RequestChecksumCalculationWhenRequired indicates request checksum will be calculated - // if required by the operation or if user elects to set a checksum algorithm in request - RequestChecksumCalculationWhenRequired -) - -// ResponseChecksumValidation controls response checksum validation workflow -type ResponseChecksumValidation int - -const ( - // ResponseChecksumValidationUnset is the unset value for ResponseChecksumValidation - ResponseChecksumValidationUnset ResponseChecksumValidation = iota - - // ResponseChecksumValidationWhenSupported indicates response checksum will be validated - // if the operation supports output checksums - ResponseChecksumValidationWhenSupported - - // ResponseChecksumValidationWhenRequired indicates response checksum will only - // be validated if the operation requires output checksum validation - ResponseChecksumValidationWhenRequired -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go deleted file mode 100644 index 3219517dabc5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ /dev/null @@ -1,250 +0,0 @@ -package aws - -import ( - "net/http" - - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// HTTPClient provides the interface to provide custom HTTPClients. Generally -// *http.Client is sufficient for most use cases. The HTTPClient should not -// follow 301 or 302 redirects. -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// A Config provides service configuration for service clients. -type Config struct { - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for - // information on AWS regions. - Region string - - // The credentials object to use when signing requests. - // Use the LoadDefaultConfig to load configuration from all the SDK's supported - // sources, and resolve credentials using the SDK's default credential chain. - Credentials CredentialsProvider - - // The Bearer Authentication token provider to use for authenticating API - // operation calls with a Bearer Authentication token. The API clients and - // operation must support Bearer Authentication scheme in order for the - // token provider to be used. API clients created with NewFromConfig will - // automatically be configured with this option, if the API client support - // Bearer Authentication. - // - // The SDK's config.LoadDefaultConfig can automatically populate this - // option for external configuration options such as SSO session. - // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html - BearerAuthTokenProvider smithybearer.TokenProvider - - // The HTTP Client the SDK's API clients will use to invoke HTTP requests. - // The SDK defaults to a BuildableClient allowing API clients to create - // copies of the HTTP Client for service specific customizations. - // - // Use a (*http.Client) for custom behavior. Using a custom http.Client - // will prevent the SDK from modifying the HTTP client. - HTTPClient HTTPClient - - // An endpoint resolver that can be used to provide or override an endpoint - // for the given service and region. - // - // See the `aws.EndpointResolver` documentation for additional usage - // information. - // - // Deprecated: See Config.EndpointResolverWithOptions - EndpointResolver EndpointResolver - - // An endpoint resolver that can be used to provide or override an endpoint - // for the given service and region. - // - // When EndpointResolverWithOptions is specified, it will be used by a - // service client rather than using EndpointResolver if also specified. - // - // See the `aws.EndpointResolverWithOptions` documentation for additional - // usage information. - // - // Deprecated: with the release of endpoint resolution v2 in API clients, - // EndpointResolver and EndpointResolverWithOptions are deprecated. - // Providing a value for this field will likely prevent you from using - // newer endpoint-related service features. See API client options - // EndpointResolverV2 and BaseEndpoint. - EndpointResolverWithOptions EndpointResolverWithOptions - - // RetryMaxAttempts specifies the maximum number attempts an API client - // will call an operation that fails with a retryable error. - // - // API Clients will only use this value to construct a retryer if the - // Config.Retryer member is not nil. This value will be ignored if - // Retryer is not nil. - RetryMaxAttempts int - - // RetryMode specifies the retry model the API client will be created with. - // - // API Clients will only use this value to construct a retryer if the - // Config.Retryer member is not nil. This value will be ignored if - // Retryer is not nil. - RetryMode RetryMode - - // Retryer is a function that provides a Retryer implementation. A Retryer - // guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - // - // In general, the provider function should return a new instance of a - // Retryer if you are attempting to provide a consistent Retryer - // configuration across all clients. This will ensure that each client will - // be provided a new instance of the Retryer implementation, and will avoid - // issues such as sharing the same retry token bucket across services. - // - // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API - // clients. - Retryer func() Retryer - - // ConfigSources are the sources that were used to construct the Config. - // Allows for additional configuration to be loaded by clients. - ConfigSources []interface{} - - // APIOptions provides the set of middleware mutations modify how the API - // client requests will be handled. This is useful for adding additional - // tracing data to a request, or changing behavior of the SDK's client. - APIOptions []func(*middleware.Stack) error - - // The logger writer interface to write logging messages to. Defaults to - // standard error. - Logger logging.Logger - - // Configures the events that will be sent to the configured logger. This - // can be used to configure the logging of signing, retries, request, and - // responses of the SDK clients. - // - // See the ClientLogMode type documentation for the complete set of logging - // modes and available configuration. - ClientLogMode ClientLogMode - - // The configured DefaultsMode. If not specified, service clients will - // default to legacy. - // - // Supported modes are: auto, cross-region, in-region, legacy, mobile, - // standard - DefaultsMode DefaultsMode - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode - // is set to DefaultsModeAuto and is initialized by - // `config.LoadDefaultConfig`. You should not populate this structure - // programmatically, or rely on the values here within your applications. - RuntimeEnvironment RuntimeEnvironment - - // AppId is an optional application specific identifier that can be set. - // When set it will be appended to the User-Agent header of every request - // in the form of App/{AppId}. This variable is sourced from environment - // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. - // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for - // more information on environment variables and shared config settings. - AppID string - - // BaseEndpoint is an intermediary transfer location to a service specific - // BaseEndpoint on a service's Options. - BaseEndpoint *string - - // DisableRequestCompression toggles if an operation request could be - // compressed or not. Will be set to false by default. This variable is sourced from - // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute - // disable_request_compression - DisableRequestCompression bool - - // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be - // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively. - // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or - // the shared config profile attribute request_min_compression_size_bytes - RequestMinCompressSizeBytes int64 - - // Controls how a resolved AWS account ID is handled for endpoint routing. - AccountIDEndpointMode AccountIDEndpointMode - - // RequestChecksumCalculation determines when request checksum calculation is performed. - // - // There are two possible values for this setting: - // - // 1. RequestChecksumCalculationWhenSupported (default): The checksum is always calculated - // if the operation supports it, regardless of whether the user sets an algorithm in the request. - // - // 2. RequestChecksumCalculationWhenRequired: The checksum is only calculated if the user - // explicitly sets a checksum algorithm in the request. - // - // This setting is sourced from the environment variable AWS_REQUEST_CHECKSUM_CALCULATION - // or the shared config profile attribute "request_checksum_calculation". - RequestChecksumCalculation RequestChecksumCalculation - - // ResponseChecksumValidation determines when response checksum validation is performed - // - // There are two possible values for this setting: - // - // 1. ResponseChecksumValidationWhenSupported (default): The checksum is always validated - // if the operation supports it, regardless of whether the user sets the validation mode to ENABLED in request. - // - // 2. ResponseChecksumValidationWhenRequired: The checksum is only validated if the user - // explicitly sets the validation mode to ENABLED in the request - // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or - // the shared config profile attribute "response_checksum_validation". - ResponseChecksumValidation ResponseChecksumValidation - - // Registry of HTTP interceptors. - Interceptors smithyhttp.InterceptorRegistry - - // Priority list of preferred auth scheme IDs. - AuthSchemePreference []string - - // ServiceOptions provides service specific configuration options that will be applied - // when constructing clients for specific services. Each callback function receives the service ID - // and the service's Options struct, allowing for dynamic configuration based on the service. - ServiceOptions []func(string, any) -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -func NewConfig() *Config { - return &Config{} -} - -// Copy will return a shallow copy of the Config object. -func (c Config) Copy() Config { - cp := c - return cp -} - -// EndpointDiscoveryEnableState indicates if endpoint discovery is -// enabled, disabled, auto or unset state. -// -// Default behavior (Auto or Unset) indicates operations that require endpoint -// discovery will use Endpoint Discovery by default. Operations that -// optionally use Endpoint Discovery will not use Endpoint Discovery -// unless EndpointDiscovery is explicitly enabled. -type EndpointDiscoveryEnableState uint - -// Enumeration values for EndpointDiscoveryEnableState -const ( - // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. - // Users do not need to use this value explicitly. The behavior for unset - // is the same as for EndpointDiscoveryAuto. - EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota - - // EndpointDiscoveryAuto represents an AUTO state that allows endpoint - // discovery only when required by the api. This is the default - // configuration resolved by the client if endpoint discovery is neither - // enabled or disabled. - EndpointDiscoveryAuto // default state - - // EndpointDiscoveryDisabled indicates client MUST not perform endpoint - // discovery even when required. - EndpointDiscoveryDisabled - - // EndpointDiscoveryEnabled indicates client MUST always perform endpoint - // discovery if supported for the operation. - EndpointDiscoveryEnabled -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go deleted file mode 100644 index 4d8e26ef3215..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go +++ /dev/null @@ -1,22 +0,0 @@ -package aws - -import ( - "context" - "time" -) - -type suppressedContext struct { - context.Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go deleted file mode 100644 index 623890e8d801..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ /dev/null @@ -1,235 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" -) - -// CredentialsCacheOptions are the options -type CredentialsCacheOptions struct { - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // An ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. This can cause an - // increased number of requests to refresh the credentials to occur. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // ExpiryWindowJitterFrac provides a mechanism for randomizing the - // expiration of credentials within the configured ExpiryWindow by a random - // percentage. Valid values are between 0.0 and 1.0. - // - // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac - // is 0.5 then credentials will be set to expire between 30 to 60 seconds - // prior to their actual expiration time. - // - // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. - // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. - // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. - // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. - ExpiryWindowJitterFrac float64 -} - -// CredentialsCache provides caching and concurrency safe credentials retrieval -// via the provider's retrieve method. -// -// CredentialsCache will look for optional interfaces on the Provider to adjust -// how the credential cache handles credentials caching. -// -// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle -// credential refresh failures. This could return an updated Credentials -// value, or attempt another means of retrieving credentials. -// -// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how -// credentials Expires is modified. This could modify how the Credentials -// Expires is adjusted based on the CredentialsCache ExpiryWindow option. -// Such as providing a floor not to reduce the Expires below. -type CredentialsCache struct { - provider CredentialsProvider - - options CredentialsCacheOptions - creds atomic.Value - sf singleflight.Group -} - -// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider -// is expected to not be nil. A variadic list of one or more functions can be -// provided to modify the CredentialsCache configuration. This allows for -// configuration of credential expiry window and jitter. -func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { - options := CredentialsCacheOptions{} - - for _, fn := range optFns { - fn(&options) - } - - if options.ExpiryWindow < 0 { - options.ExpiryWindow = 0 - } - - if options.ExpiryWindowJitterFrac < 0 { - options.ExpiryWindowJitterFrac = 0 - } else if options.ExpiryWindowJitterFrac > 1 { - options.ExpiryWindowJitterFrac = 1 - } - - return &CredentialsCache{ - provider: provider, - options: options, - } -} - -// Retrieve returns the credentials. If the credentials have already been -// retrieved, and not expired the cached credentials will be returned. If the -// credentials have not been retrieved yet, or expired the provider's Retrieve -// method will be called. -// -// Returns and error if the provider's retrieve method returns an error. -func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { - if creds, ok := p.getCreds(); ok && !creds.Expired() { - return creds, nil - } - - resCh := p.sf.DoChan("", func() (interface{}, error) { - return p.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Credentials), res.Err - case <-ctx.Done(): - return Credentials{}, &RequestCanceledError{Err: ctx.Err()} - } -} - -func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { - currCreds, ok := p.getCreds() - if ok && !currCreds.Expired() { - return currCreds, nil - } - - newCreds, err := p.provider.Retrieve(ctx) - if err != nil { - handleFailToRefresh := defaultHandleFailToRefresh - if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { - handleFailToRefresh = cs.HandleFailToRefresh - } - newCreds, err = handleFailToRefresh(ctx, currCreds, err) - if err != nil { - return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) - } - } - - if newCreds.CanExpire && p.options.ExpiryWindow > 0 { - adjustExpiresBy := defaultAdjustExpiresBy - if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { - adjustExpiresBy = cs.AdjustExpiresBy - } - - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) - } - - var jitter time.Duration - if p.options.ExpiryWindowJitterFrac > 0 { - jitter = time.Duration(randFloat64 * - p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) - } - - newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) - if err != nil { - return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) - } - } - - p.creds.Store(&newCreds) - return newCreds, nil -} - -// getCreds returns the currently stored credentials and true. Returning false -// if no credentials were stored. -func (p *CredentialsCache) getCreds() (Credentials, bool) { - v := p.creds.Load() - if v == nil { - return Credentials{}, false - } - - c := v.(*Credentials) - if c == nil || !c.HasKeys() { - return Credentials{}, false - } - - return *c, true -} - -// ProviderSources returns a list of where the underlying credential provider -// has been sourced, if available. Returns empty if the provider doesn't implement -// the interface -func (p *CredentialsCache) ProviderSources() []CredentialSource { - asSource, ok := p.provider.(CredentialProviderSource) - if !ok { - return []CredentialSource{} - } - return asSource.ProviderSources() -} - -// Invalidate will invalidate the cached credentials. The next call to Retrieve -// will cause the provider's Retrieve method to be called. -func (p *CredentialsCache) Invalidate() { - p.creds.Store((*Credentials)(nil)) -} - -// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache -// matches the target provider type. -func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool { - return IsCredentialsProvider(p.provider, target) -} - -// HandleFailRefreshCredentialsCacheStrategy is an interface for -// CredentialsCache to allow CredentialsProvider how failed to refresh -// credentials is handled. -type HandleFailRefreshCredentialsCacheStrategy interface { - // Given the previously cached Credentials, if any, and refresh error, may - // returns new or modified set of Credentials, or error. - // - // Credential caches may use default implementation if nil. - HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) -} - -// defaultHandleFailToRefresh returns the passed in error. -func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { - return Credentials{}, err -} - -// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache -// to allow CredentialsProvider to intercept adjustments to Credentials expiry -// based on expectations and use cases of CredentialsProvider. -// -// Credential caches may use default implementation if nil. -type AdjustExpiresByCredentialsCacheStrategy interface { - // Given a Credentials as input, applying any mutations and - // returning the potentially updated Credentials, or error. - AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) -} - -// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, -// and returns the updated credentials value. If Credentials value's CanExpire -// is false, the passed in credentials are returned unchanged. -func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { - if !creds.CanExpire { - return creds, nil - } - - creds.Expires = creds.Expires.Add(dur) - return creds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go deleted file mode 100644 index 4ad2ee4405d6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ /dev/null @@ -1,230 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// AnonymousCredentials provides a sentinel CredentialsProvider that should be -// used to instruct the SDK's signing middleware to not sign the request. -// -// Using `nil` credentials when configuring an API client will achieve the same -// result. The AnonymousCredentials type allows you to configure the SDK's -// external config loading to not attempt to source credentials from the shared -// config or environment. -// -// For example you can use this CredentialsProvider with an API client's -// Options to instruct the client not to sign a request for accessing public -// S3 bucket objects. -// -// The following example demonstrates using the AnonymousCredentials to prevent -// SDK's external config loading attempt to resolve credentials. -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithCredentialsProvider(aws.AnonymousCredentials{}), -// ) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } -// -// client := s3.NewFromConfig(cfg) -// -// Alternatively you can leave the API client Option's `Credential` member to -// nil. If using the `NewFromConfig` constructor you'll need to explicitly set -// the `Credentials` member to nil, if the external config resolved a -// credential provider. -// -// client := s3.New(s3.Options{ -// // Credentials defaults to a nil value. -// }) -// -// This can also be configured for specific operations calls too. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } -// -// client := s3.NewFromConfig(config) -// -// result, err := client.GetObject(context.TODO(), s3.GetObject{ -// Bucket: aws.String("example-bucket"), -// Key: aws.String("example-key"), -// }, func(o *s3.Options) { -// o.Credentials = nil -// // Or -// o.Credentials = aws.AnonymousCredentials{} -// }) -type AnonymousCredentials struct{} - -// Retrieve implements the CredentialsProvider interface, but will always -// return error, and cannot be used to sign a request. The AnonymousCredentials -// type is used as a sentinel type instructing the AWS request signing -// middleware to not sign a request. -func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { - return Credentials{Source: "AnonymousCredentials"}, - fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") -} - -// CredentialSource is the source of the credential provider. -// A provider can have multiple credential sources: For example, a provider that reads a profile, calls ECS to -// get credentials and then assumes a role using STS will have all these as part of its provider chain. -type CredentialSource int - -const ( - // CredentialSourceUndefined is the sentinel zero value - CredentialSourceUndefined CredentialSource = iota - // CredentialSourceCode credentials resolved from code, cli parameters, session object, or client instance - CredentialSourceCode - // CredentialSourceEnvVars credentials resolved from environment variables - CredentialSourceEnvVars - // CredentialSourceEnvVarsSTSWebIDToken credentials resolved from environment variables for assuming a role with STS using a web identity token - CredentialSourceEnvVarsSTSWebIDToken - // CredentialSourceSTSAssumeRole credentials resolved from STS using AssumeRole - CredentialSourceSTSAssumeRole - // CredentialSourceSTSAssumeRoleSaml credentials resolved from STS using assume role with SAML - CredentialSourceSTSAssumeRoleSaml - // CredentialSourceSTSAssumeRoleWebID credentials resolved from STS using assume role with web identity - CredentialSourceSTSAssumeRoleWebID - // CredentialSourceSTSFederationToken credentials resolved from STS using a federation token - CredentialSourceSTSFederationToken - // CredentialSourceSTSSessionToken credentials resolved from STS using a session token S - CredentialSourceSTSSessionToken - // CredentialSourceProfile credentials resolved from a config file(s) profile with static credentials - CredentialSourceProfile - // CredentialSourceProfileSourceProfile credentials resolved from a source profile in a config file(s) profile - CredentialSourceProfileSourceProfile - // CredentialSourceProfileNamedProvider credentials resolved from a named provider in a config file(s) profile (like EcsContainer) - CredentialSourceProfileNamedProvider - // CredentialSourceProfileSTSWebIDToken credentials resolved from configuration for assuming a role with STS using web identity token in a config file(s) profile - CredentialSourceProfileSTSWebIDToken - // CredentialSourceProfileSSO credentials resolved from an SSO session in a config file(s) profile - CredentialSourceProfileSSO - // CredentialSourceSSO credentials resolved from an SSO session - CredentialSourceSSO - // CredentialSourceProfileSSOLegacy credentials resolved from an SSO session in a config file(s) profile using legacy format - CredentialSourceProfileSSOLegacy - // CredentialSourceSSOLegacy credentials resolved from an SSO session using legacy format - CredentialSourceSSOLegacy - // CredentialSourceProfileProcess credentials resolved from a process in a config file(s) profile - CredentialSourceProfileProcess - // CredentialSourceProcess credentials resolved from a process - CredentialSourceProcess - // CredentialSourceHTTP credentials resolved from an HTTP endpoint - CredentialSourceHTTP - // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS) - CredentialSourceIMDS -) - -// A Credentials is the AWS credentials value for individual credential fields. -type Credentials struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Source of the credentials - Source string - - // States if the credentials can expire or not. - CanExpire bool - - // The time the credentials will expire at. Should be ignored if CanExpire - // is false. - Expires time.Time - - // The ID of the account for the credentials. - AccountID string -} - -// Expired returns if the credentials have expired. -func (v Credentials) Expired() bool { - if v.CanExpire { - // Calling Round(0) on the current time will truncate the monotonic - // reading only. Ensures credential expiry time is always based on - // reported wall-clock time. - return !v.Expires.After(sdk.NowTime().Round(0)) - } - - return false -} - -// HasKeys returns if the credentials keys are set. -func (v Credentials) HasKeys() bool { - return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 -} - -// A CredentialsProvider is the interface for any component which will provide -// credentials Credentials. A CredentialsProvider is required to manage its own -// Expired state, and what to be expired means. -// -// A credentials provider implementation can be wrapped with a CredentialCache -// to cache the credential value retrieved. Without the cache the SDK will -// attempt to retrieve the credentials for every request. -type CredentialsProvider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve(ctx context.Context) (Credentials, error) -} - -// CredentialProviderSource allows any credential provider to track -// all providers where a credential provider were sourced. For example, if the credentials came from a -// call to a role specified in the profile, this method will give the whole breadcrumb trail -type CredentialProviderSource interface { - ProviderSources() []CredentialSource -} - -// CredentialsProviderFunc provides a helper wrapping a function value to -// satisfy the CredentialsProvider interface. -type CredentialsProviderFunc func(context.Context) (Credentials, error) - -// Retrieve delegates to the function value the CredentialsProviderFunc wraps. -func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { - return fn(ctx) -} - -type isCredentialsProvider interface { - IsCredentialsProvider(CredentialsProvider) bool -} - -// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the -// implementation type. -// -// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating -// whether target matches the credential provider type. -// -// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used: -// -// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false -// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false -// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false -// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false -func IsCredentialsProvider(provider, target CredentialsProvider) bool { - if target == nil || provider == nil { - return provider == target - } - - if x, ok := provider.(isCredentialsProvider); ok { - return x.IsCredentialsProvider(target) - } - - targetType := reflect.TypeOf(target) - if targetType.Kind() != reflect.Ptr { - targetType = reflect.PtrTo(targetType) - } - - providerType := reflect.TypeOf(provider) - if providerType.Kind() != reflect.Ptr { - providerType = reflect.PtrTo(providerType) - } - - return targetType.AssignableTo(providerType) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go deleted file mode 100644 index fd408e518600..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go +++ /dev/null @@ -1,38 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "runtime" - "strings" -) - -var getGOOS = func() string { - return runtime.GOOS -} - -// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode -// is set to aws.DefaultsModeAuto. -func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { - goos := getGOOS() - if goos == "android" || goos == "ios" { - return aws.DefaultsModeMobile - } - - var currentRegion string - if len(environment.EnvironmentIdentifier) > 0 { - currentRegion = environment.Region - } - - if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { - currentRegion = environment.EC2InstanceMetadataRegion - } - - if len(region) > 0 && len(currentRegion) > 0 { - if strings.EqualFold(region, currentRegion) { - return aws.DefaultsModeInRegion - } - return aws.DefaultsModeCrossRegion - } - - return aws.DefaultsModeStandard -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go deleted file mode 100644 index 8b7e01fa29a8..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go +++ /dev/null @@ -1,43 +0,0 @@ -package defaults - -import ( - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// Configuration is the set of SDK configuration options that are determined based -// on the configured DefaultsMode. -type Configuration struct { - // RetryMode is the configuration's default retry mode API clients should - // use for constructing a Retryer. - RetryMode aws.RetryMode - - // ConnectTimeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // See https://pkg.go.dev/net#Dialer.Timeout - ConnectTimeout *time.Duration - - // TLSNegotiationTimeout specifies the maximum amount of time waiting to - // wait for a TLS handshake. - // - // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout - TLSNegotiationTimeout *time.Duration -} - -// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. -func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { - if c.ConnectTimeout == nil { - return 0, false - } - return *c.ConnectTimeout, true -} - -// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. -func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { - if c.TLSNegotiationTimeout == nil { - return 0, false - } - return *c.TLSNegotiationTimeout, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go deleted file mode 100644 index dbaa873dc899..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. - -package defaults - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "time" -) - -// GetModeConfiguration returns the default Configuration descriptor for the given mode. -// -// Supports the following modes: cross-region, in-region, mobile, standard -func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { - var mv aws.DefaultsMode - mv.SetFromString(string(mode)) - - switch mv { - case aws.DefaultsModeCrossRegion: - settings := Configuration{ - ConnectTimeout: aws.Duration(3100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeInRegion: - settings := Configuration{ - ConnectTimeout: aws.Duration(1100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeMobile: - settings := Configuration{ - ConnectTimeout: aws.Duration(30000 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeStandard: - settings := Configuration{ - ConnectTimeout: aws.Duration(3100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), - } - return settings, nil - default: - return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go deleted file mode 100644 index 2d90011b426f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package defaults provides recommended configuration values for AWS SDKs and CLIs. -package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go deleted file mode 100644 index fcf9387c281a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. - -package aws - -import ( - "strings" -) - -// DefaultsMode is the SDK defaults mode setting. -type DefaultsMode string - -// The DefaultsMode constants. -const ( - // DefaultsModeAuto is an experimental mode that builds on the standard mode. - // The SDK will attempt to discover the execution environment to determine the - // appropriate settings automatically. - // - // Note that the auto detection is heuristics-based and does not guarantee 100% - // accuracy. STANDARD mode will be used if the execution environment cannot - // be determined. The auto detection might query EC2 Instance Metadata service - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), - // which might introduce latency. Therefore we recommend choosing an explicit - // defaults_mode instead if startup latency is critical to your application - DefaultsModeAuto DefaultsMode = "auto" - - // DefaultsModeCrossRegion builds on the standard mode and includes optimization - // tailored for applications which call AWS services in a different region - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeCrossRegion DefaultsMode = "cross-region" - - // DefaultsModeInRegion builds on the standard mode and includes optimization - // tailored for applications which call AWS services from within the same AWS - // region - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeInRegion DefaultsMode = "in-region" - - // DefaultsModeLegacy provides default settings that vary per SDK and were used - // prior to establishment of defaults_mode - DefaultsModeLegacy DefaultsMode = "legacy" - - // DefaultsModeMobile builds on the standard mode and includes optimization - // tailored for mobile applications - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeMobile DefaultsMode = "mobile" - - // DefaultsModeStandard provides the latest recommended default values that - // should be safe to run in most scenarios - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeStandard DefaultsMode = "standard" -) - -// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches -// the provided string when compared using EqualFold. If the value does not match a known -// constant it will be set to as-is and the function will return false. As a special case, if the -// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. -func (d *DefaultsMode) SetFromString(v string) (ok bool) { - switch { - case strings.EqualFold(v, string(DefaultsModeAuto)): - *d = DefaultsModeAuto - ok = true - case strings.EqualFold(v, string(DefaultsModeCrossRegion)): - *d = DefaultsModeCrossRegion - ok = true - case strings.EqualFold(v, string(DefaultsModeInRegion)): - *d = DefaultsModeInRegion - ok = true - case strings.EqualFold(v, string(DefaultsModeLegacy)): - *d = DefaultsModeLegacy - ok = true - case strings.EqualFold(v, string(DefaultsModeMobile)): - *d = DefaultsModeMobile - ok = true - case strings.EqualFold(v, string(DefaultsModeStandard)): - *d = DefaultsModeStandard - ok = true - case len(v) == 0: - *d = DefaultsModeLegacy - ok = true - default: - *d = DefaultsMode(v) - } - return ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go deleted file mode 100644 index d8b6e09e593d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// # Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.ToString(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.ToStringSlice(strPtrs) -// -// # SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws - -// generate.go uses a build tag of "ignore", go run doesn't need to specify -// this because go run ignores all build flags when running a go file directly. -//go:generate go run -tags codegen generate.go -//go:generate go run -tags codegen logging_generate.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go deleted file mode 100644 index 99edbf3ee634..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ /dev/null @@ -1,247 +0,0 @@ -package aws - -import ( - "fmt" -) - -// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. -type DualStackEndpointState uint - -const ( - // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. - DualStackEndpointStateUnset DualStackEndpointState = iota - - // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. - DualStackEndpointStateEnabled - - // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. - DualStackEndpointStateDisabled -) - -// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. -// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. -func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { - type iface interface { - GetUseDualStackEndpoint() DualStackEndpointState - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetUseDualStackEndpoint() - found = true - break - } - } - return value, found -} - -// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. -type FIPSEndpointState uint - -const ( - // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. - FIPSEndpointStateUnset FIPSEndpointState = iota - - // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. - FIPSEndpointStateEnabled - - // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. - FIPSEndpointStateDisabled -) - -// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. -// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. -func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { - type iface interface { - GetUseFIPSEndpoint() FIPSEndpointState - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetUseFIPSEndpoint() - found = true - break - } - } - return value, found -} - -// Endpoint represents the endpoint a service client should make API operation -// calls to. -// -// The SDK will automatically resolve these endpoints per API client using an -// internal endpoint resolvers. If you'd like to provide custom endpoint -// resolving behavior you can implement the EndpointResolver interface. -// -// Deprecated: This structure was used with the global [EndpointResolver] -// interface, which has been deprecated in favor of service-specific endpoint -// resolution. See the deprecation docs on that interface for more information. -type Endpoint struct { - // The base URL endpoint the SDK API clients will use to make API calls to. - // The SDK will suffix URI path and query elements to this endpoint. - URL string - - // Specifies if the endpoint's hostname can be modified by the SDK's API - // client. - // - // If the hostname is mutable the SDK API clients may modify any part of - // the hostname based on the requirements of the API, (e.g. adding, or - // removing content in the hostname). Such as, Amazon S3 API client - // prefixing "bucketname" to the hostname, or changing the - // hostname service name component from "s3." to "s3-accesspoint.dualstack." - // for the dualstack endpoint of an S3 Accesspoint resource. - // - // Care should be taken when providing a custom endpoint for an API. If the - // endpoint hostname is mutable, and the client cannot modify the endpoint - // correctly, the operation call will most likely fail, or have undefined - // behavior. - // - // If hostname is immutable, the SDK API clients will not modify the - // hostname of the URL. This may cause the API client not to function - // correctly if the API requires the operation specific hostname values - // to be used by the client. - // - // This flag does not modify the API client's behavior if this endpoint - // will be used instead of Endpoint Discovery, or if the endpoint will be - // used to perform Endpoint Discovery. That behavior is configured via the - // API Client's Options. - HostnameImmutable bool - - // The AWS partition the endpoint belongs to. - PartitionID string - - // The service name that should be used for signing the requests to the - // endpoint. - SigningName string - - // The region that should be used for signing the request to the endpoint. - SigningRegion string - - // The signing method that should be used for signing the requests to the - // endpoint. - SigningMethod string - - // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. - // When providing a custom endpoint, you should set the source as EndpointSourceCustom. - // If source is not provided when providing a custom endpoint, the SDK may not - // perform required host mutations correctly. Source should be used along with - // HostnameImmutable property as per the usage requirement. - Source EndpointSource -} - -// EndpointSource is the endpoint source type. -// -// Deprecated: The global [Endpoint] structure is deprecated. -type EndpointSource int - -const ( - // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. - EndpointSourceServiceMetadata EndpointSource = iota - - // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when - // user provides a custom endpoint to be used by the SDK. - EndpointSourceCustom -) - -// EndpointNotFoundError is a sentinel error to indicate that the -// EndpointResolver implementation was unable to resolve an endpoint for the -// given service and region. Resolvers should use this to indicate that an API -// client should fallback and attempt to use it's internal default resolver to -// resolve the endpoint. -type EndpointNotFoundError struct { - Err error -} - -// Error is the error message. -func (e *EndpointNotFoundError) Error() string { - return fmt.Sprintf("endpoint not found, %v", e.Err) -} - -// Unwrap returns the underlying error. -func (e *EndpointNotFoundError) Unwrap() error { - return e.Err -} - -// EndpointResolver is an endpoint resolver that can be used to provide or -// override an endpoint for the given service and region. API clients will -// attempt to use the EndpointResolver first to resolve an endpoint if -// available. If the EndpointResolver returns an EndpointNotFoundError error, -// API clients will fallback to attempting to resolve the endpoint using its -// internal default endpoint resolver. -// -// Deprecated: The global endpoint resolution interface is deprecated. The API -// for endpoint resolution is now unique to each service and is set via the -// EndpointResolverV2 field on service client options. Setting a value for -// EndpointResolver on aws.Config or service client options will prevent you -// from using any endpoint-related service features released after the -// introduction of EndpointResolverV2. You may also encounter broken or -// unexpected behavior when using the old global interface with services that -// use many endpoint-related customizations such as S3. -type EndpointResolver interface { - ResolveEndpoint(service, region string) (Endpoint, error) -} - -// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [EndpointResolver]. -type EndpointResolverFunc func(service, region string) (Endpoint, error) - -// ResolveEndpoint calls the wrapped function and returns the results. -func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { - return e(service, region) -} - -// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or -// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will -// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if -// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, -// API clients will fallback to attempting to resolve the endpoint using its -// internal default endpoint resolver. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [EndpointResolver]. -type EndpointResolverWithOptions interface { - ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) -} - -// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [EndpointResolver]. -type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) - -// ResolveEndpoint calls the wrapped function and returns the results. -func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { - return e(service, region, options...) -} - -// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. -// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. -func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { - type iface interface { - GetDisableHTTPS() bool - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetDisableHTTPS() - found = true - break - } - } - return value, found -} - -// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. -// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. -func GetResolvedRegion(options ...interface{}) (value string, found bool) { - type iface interface { - GetResolvedRegion() string - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetResolvedRegion() - found = true - break - } - } - return value, found -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go deleted file mode 100644 index f390a08f9ffa..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package aws - -// MissingRegionError is an error that is returned if region configuration -// value was not found. -type MissingRegionError struct{} - -func (*MissingRegionError) Error() string { - return "an AWS region is required, but was not found" -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go deleted file mode 100644 index 2394418e9bd5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by aws/generate.go DO NOT EDIT. - -package aws - -import ( - "github.com/aws/smithy-go/ptr" - "time" -) - -// ToBool returns bool value dereferenced if the passed -// in pointer was not nil. Returns a bool zero value if the -// pointer was nil. -func ToBool(p *bool) (v bool) { - return ptr.ToBool(p) -} - -// ToBoolSlice returns a slice of bool values, that are -// dereferenced if the passed in pointer was not nil. Returns a bool -// zero value if the pointer was nil. -func ToBoolSlice(vs []*bool) []bool { - return ptr.ToBoolSlice(vs) -} - -// ToBoolMap returns a map of bool values, that are -// dereferenced if the passed in pointer was not nil. The bool -// zero value is used if the pointer was nil. -func ToBoolMap(vs map[string]*bool) map[string]bool { - return ptr.ToBoolMap(vs) -} - -// ToByte returns byte value dereferenced if the passed -// in pointer was not nil. Returns a byte zero value if the -// pointer was nil. -func ToByte(p *byte) (v byte) { - return ptr.ToByte(p) -} - -// ToByteSlice returns a slice of byte values, that are -// dereferenced if the passed in pointer was not nil. Returns a byte -// zero value if the pointer was nil. -func ToByteSlice(vs []*byte) []byte { - return ptr.ToByteSlice(vs) -} - -// ToByteMap returns a map of byte values, that are -// dereferenced if the passed in pointer was not nil. The byte -// zero value is used if the pointer was nil. -func ToByteMap(vs map[string]*byte) map[string]byte { - return ptr.ToByteMap(vs) -} - -// ToString returns string value dereferenced if the passed -// in pointer was not nil. Returns a string zero value if the -// pointer was nil. -func ToString(p *string) (v string) { - return ptr.ToString(p) -} - -// ToStringSlice returns a slice of string values, that are -// dereferenced if the passed in pointer was not nil. Returns a string -// zero value if the pointer was nil. -func ToStringSlice(vs []*string) []string { - return ptr.ToStringSlice(vs) -} - -// ToStringMap returns a map of string values, that are -// dereferenced if the passed in pointer was not nil. The string -// zero value is used if the pointer was nil. -func ToStringMap(vs map[string]*string) map[string]string { - return ptr.ToStringMap(vs) -} - -// ToInt returns int value dereferenced if the passed -// in pointer was not nil. Returns a int zero value if the -// pointer was nil. -func ToInt(p *int) (v int) { - return ptr.ToInt(p) -} - -// ToIntSlice returns a slice of int values, that are -// dereferenced if the passed in pointer was not nil. Returns a int -// zero value if the pointer was nil. -func ToIntSlice(vs []*int) []int { - return ptr.ToIntSlice(vs) -} - -// ToIntMap returns a map of int values, that are -// dereferenced if the passed in pointer was not nil. The int -// zero value is used if the pointer was nil. -func ToIntMap(vs map[string]*int) map[string]int { - return ptr.ToIntMap(vs) -} - -// ToInt8 returns int8 value dereferenced if the passed -// in pointer was not nil. Returns a int8 zero value if the -// pointer was nil. -func ToInt8(p *int8) (v int8) { - return ptr.ToInt8(p) -} - -// ToInt8Slice returns a slice of int8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int8 -// zero value if the pointer was nil. -func ToInt8Slice(vs []*int8) []int8 { - return ptr.ToInt8Slice(vs) -} - -// ToInt8Map returns a map of int8 values, that are -// dereferenced if the passed in pointer was not nil. The int8 -// zero value is used if the pointer was nil. -func ToInt8Map(vs map[string]*int8) map[string]int8 { - return ptr.ToInt8Map(vs) -} - -// ToInt16 returns int16 value dereferenced if the passed -// in pointer was not nil. Returns a int16 zero value if the -// pointer was nil. -func ToInt16(p *int16) (v int16) { - return ptr.ToInt16(p) -} - -// ToInt16Slice returns a slice of int16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int16 -// zero value if the pointer was nil. -func ToInt16Slice(vs []*int16) []int16 { - return ptr.ToInt16Slice(vs) -} - -// ToInt16Map returns a map of int16 values, that are -// dereferenced if the passed in pointer was not nil. The int16 -// zero value is used if the pointer was nil. -func ToInt16Map(vs map[string]*int16) map[string]int16 { - return ptr.ToInt16Map(vs) -} - -// ToInt32 returns int32 value dereferenced if the passed -// in pointer was not nil. Returns a int32 zero value if the -// pointer was nil. -func ToInt32(p *int32) (v int32) { - return ptr.ToInt32(p) -} - -// ToInt32Slice returns a slice of int32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int32 -// zero value if the pointer was nil. -func ToInt32Slice(vs []*int32) []int32 { - return ptr.ToInt32Slice(vs) -} - -// ToInt32Map returns a map of int32 values, that are -// dereferenced if the passed in pointer was not nil. The int32 -// zero value is used if the pointer was nil. -func ToInt32Map(vs map[string]*int32) map[string]int32 { - return ptr.ToInt32Map(vs) -} - -// ToInt64 returns int64 value dereferenced if the passed -// in pointer was not nil. Returns a int64 zero value if the -// pointer was nil. -func ToInt64(p *int64) (v int64) { - return ptr.ToInt64(p) -} - -// ToInt64Slice returns a slice of int64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int64 -// zero value if the pointer was nil. -func ToInt64Slice(vs []*int64) []int64 { - return ptr.ToInt64Slice(vs) -} - -// ToInt64Map returns a map of int64 values, that are -// dereferenced if the passed in pointer was not nil. The int64 -// zero value is used if the pointer was nil. -func ToInt64Map(vs map[string]*int64) map[string]int64 { - return ptr.ToInt64Map(vs) -} - -// ToUint returns uint value dereferenced if the passed -// in pointer was not nil. Returns a uint zero value if the -// pointer was nil. -func ToUint(p *uint) (v uint) { - return ptr.ToUint(p) -} - -// ToUintSlice returns a slice of uint values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint -// zero value if the pointer was nil. -func ToUintSlice(vs []*uint) []uint { - return ptr.ToUintSlice(vs) -} - -// ToUintMap returns a map of uint values, that are -// dereferenced if the passed in pointer was not nil. The uint -// zero value is used if the pointer was nil. -func ToUintMap(vs map[string]*uint) map[string]uint { - return ptr.ToUintMap(vs) -} - -// ToUint8 returns uint8 value dereferenced if the passed -// in pointer was not nil. Returns a uint8 zero value if the -// pointer was nil. -func ToUint8(p *uint8) (v uint8) { - return ptr.ToUint8(p) -} - -// ToUint8Slice returns a slice of uint8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint8 -// zero value if the pointer was nil. -func ToUint8Slice(vs []*uint8) []uint8 { - return ptr.ToUint8Slice(vs) -} - -// ToUint8Map returns a map of uint8 values, that are -// dereferenced if the passed in pointer was not nil. The uint8 -// zero value is used if the pointer was nil. -func ToUint8Map(vs map[string]*uint8) map[string]uint8 { - return ptr.ToUint8Map(vs) -} - -// ToUint16 returns uint16 value dereferenced if the passed -// in pointer was not nil. Returns a uint16 zero value if the -// pointer was nil. -func ToUint16(p *uint16) (v uint16) { - return ptr.ToUint16(p) -} - -// ToUint16Slice returns a slice of uint16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint16 -// zero value if the pointer was nil. -func ToUint16Slice(vs []*uint16) []uint16 { - return ptr.ToUint16Slice(vs) -} - -// ToUint16Map returns a map of uint16 values, that are -// dereferenced if the passed in pointer was not nil. The uint16 -// zero value is used if the pointer was nil. -func ToUint16Map(vs map[string]*uint16) map[string]uint16 { - return ptr.ToUint16Map(vs) -} - -// ToUint32 returns uint32 value dereferenced if the passed -// in pointer was not nil. Returns a uint32 zero value if the -// pointer was nil. -func ToUint32(p *uint32) (v uint32) { - return ptr.ToUint32(p) -} - -// ToUint32Slice returns a slice of uint32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint32 -// zero value if the pointer was nil. -func ToUint32Slice(vs []*uint32) []uint32 { - return ptr.ToUint32Slice(vs) -} - -// ToUint32Map returns a map of uint32 values, that are -// dereferenced if the passed in pointer was not nil. The uint32 -// zero value is used if the pointer was nil. -func ToUint32Map(vs map[string]*uint32) map[string]uint32 { - return ptr.ToUint32Map(vs) -} - -// ToUint64 returns uint64 value dereferenced if the passed -// in pointer was not nil. Returns a uint64 zero value if the -// pointer was nil. -func ToUint64(p *uint64) (v uint64) { - return ptr.ToUint64(p) -} - -// ToUint64Slice returns a slice of uint64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint64 -// zero value if the pointer was nil. -func ToUint64Slice(vs []*uint64) []uint64 { - return ptr.ToUint64Slice(vs) -} - -// ToUint64Map returns a map of uint64 values, that are -// dereferenced if the passed in pointer was not nil. The uint64 -// zero value is used if the pointer was nil. -func ToUint64Map(vs map[string]*uint64) map[string]uint64 { - return ptr.ToUint64Map(vs) -} - -// ToFloat32 returns float32 value dereferenced if the passed -// in pointer was not nil. Returns a float32 zero value if the -// pointer was nil. -func ToFloat32(p *float32) (v float32) { - return ptr.ToFloat32(p) -} - -// ToFloat32Slice returns a slice of float32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float32 -// zero value if the pointer was nil. -func ToFloat32Slice(vs []*float32) []float32 { - return ptr.ToFloat32Slice(vs) -} - -// ToFloat32Map returns a map of float32 values, that are -// dereferenced if the passed in pointer was not nil. The float32 -// zero value is used if the pointer was nil. -func ToFloat32Map(vs map[string]*float32) map[string]float32 { - return ptr.ToFloat32Map(vs) -} - -// ToFloat64 returns float64 value dereferenced if the passed -// in pointer was not nil. Returns a float64 zero value if the -// pointer was nil. -func ToFloat64(p *float64) (v float64) { - return ptr.ToFloat64(p) -} - -// ToFloat64Slice returns a slice of float64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float64 -// zero value if the pointer was nil. -func ToFloat64Slice(vs []*float64) []float64 { - return ptr.ToFloat64Slice(vs) -} - -// ToFloat64Map returns a map of float64 values, that are -// dereferenced if the passed in pointer was not nil. The float64 -// zero value is used if the pointer was nil. -func ToFloat64Map(vs map[string]*float64) map[string]float64 { - return ptr.ToFloat64Map(vs) -} - -// ToTime returns time.Time value dereferenced if the passed -// in pointer was not nil. Returns a time.Time zero value if the -// pointer was nil. -func ToTime(p *time.Time) (v time.Time) { - return ptr.ToTime(p) -} - -// ToTimeSlice returns a slice of time.Time values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Time -// zero value if the pointer was nil. -func ToTimeSlice(vs []*time.Time) []time.Time { - return ptr.ToTimeSlice(vs) -} - -// ToTimeMap returns a map of time.Time values, that are -// dereferenced if the passed in pointer was not nil. The time.Time -// zero value is used if the pointer was nil. -func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { - return ptr.ToTimeMap(vs) -} - -// ToDuration returns time.Duration value dereferenced if the passed -// in pointer was not nil. Returns a time.Duration zero value if the -// pointer was nil. -func ToDuration(p *time.Duration) (v time.Duration) { - return ptr.ToDuration(p) -} - -// ToDurationSlice returns a slice of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Duration -// zero value if the pointer was nil. -func ToDurationSlice(vs []*time.Duration) []time.Duration { - return ptr.ToDurationSlice(vs) -} - -// ToDurationMap returns a map of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. The time.Duration -// zero value is used if the pointer was nil. -func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { - return ptr.ToDurationMap(vs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go deleted file mode 100644 index 89449f67b266..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package aws - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go deleted file mode 100644 index 91c94d987b10..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by aws/logging_generate.go DO NOT EDIT. - -package aws - -// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where -// each bit is a flag that describes the logging behavior for one or more client components. -// The entire 64-bit group is reserved for later expansion by the SDK. -// -// Example: Setting ClientLogMode to enable logging of retries and requests -// -// clientLogMode := aws.LogRetries | aws.LogRequest -// -// Example: Adding an additional log mode to an existing ClientLogMode value -// -// clientLogMode |= aws.LogResponse -type ClientLogMode uint64 - -// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. -const ( - LogSigning ClientLogMode = 1 << (64 - 1 - iota) - LogRetries - LogRequest - LogRequestWithBody - LogResponse - LogResponseWithBody - LogDeprecatedUsage - LogRequestEventMessage - LogResponseEventMessage -) - -// IsSigning returns whether the Signing logging mode bit is set -func (m ClientLogMode) IsSigning() bool { - return m&LogSigning != 0 -} - -// IsRetries returns whether the Retries logging mode bit is set -func (m ClientLogMode) IsRetries() bool { - return m&LogRetries != 0 -} - -// IsRequest returns whether the Request logging mode bit is set -func (m ClientLogMode) IsRequest() bool { - return m&LogRequest != 0 -} - -// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set -func (m ClientLogMode) IsRequestWithBody() bool { - return m&LogRequestWithBody != 0 -} - -// IsResponse returns whether the Response logging mode bit is set -func (m ClientLogMode) IsResponse() bool { - return m&LogResponse != 0 -} - -// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set -func (m ClientLogMode) IsResponseWithBody() bool { - return m&LogResponseWithBody != 0 -} - -// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set -func (m ClientLogMode) IsDeprecatedUsage() bool { - return m&LogDeprecatedUsage != 0 -} - -// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set -func (m ClientLogMode) IsRequestEventMessage() bool { - return m&LogRequestEventMessage != 0 -} - -// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set -func (m ClientLogMode) IsResponseEventMessage() bool { - return m&LogResponseEventMessage != 0 -} - -// ClearSigning clears the Signing logging mode bit -func (m *ClientLogMode) ClearSigning() { - *m &^= LogSigning -} - -// ClearRetries clears the Retries logging mode bit -func (m *ClientLogMode) ClearRetries() { - *m &^= LogRetries -} - -// ClearRequest clears the Request logging mode bit -func (m *ClientLogMode) ClearRequest() { - *m &^= LogRequest -} - -// ClearRequestWithBody clears the RequestWithBody logging mode bit -func (m *ClientLogMode) ClearRequestWithBody() { - *m &^= LogRequestWithBody -} - -// ClearResponse clears the Response logging mode bit -func (m *ClientLogMode) ClearResponse() { - *m &^= LogResponse -} - -// ClearResponseWithBody clears the ResponseWithBody logging mode bit -func (m *ClientLogMode) ClearResponseWithBody() { - *m &^= LogResponseWithBody -} - -// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit -func (m *ClientLogMode) ClearDeprecatedUsage() { - *m &^= LogDeprecatedUsage -} - -// ClearRequestEventMessage clears the RequestEventMessage logging mode bit -func (m *ClientLogMode) ClearRequestEventMessage() { - *m &^= LogRequestEventMessage -} - -// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit -func (m *ClientLogMode) ClearResponseEventMessage() { - *m &^= LogResponseEventMessage -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go deleted file mode 100644 index 6ecc2231a122..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build clientlogmode -// +build clientlogmode - -package main - -import ( - "fmt" - "log" - "os" - "strings" - "text/template" -) - -var config = struct { - ModeBits []string -}{ - // Items should be appended only to keep bit-flag positions stable - ModeBits: []string{ - "Signing", - "Retries", - "Request", - "RequestWithBody", - "Response", - "ResponseWithBody", - "DeprecatedUsage", - "RequestEventMessage", - "ResponseEventMessage", - }, -} - -func bitName(name string) string { - return strings.ToUpper(name[:1]) + name[1:] -} - -var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ - "symbolName": func(name string) string { - return "Log" + bitName(name) - }, - "bitName": bitName, -}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. - -package aws - -// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where -// each bit is a flag that describes the logging behavior for one or more client components. -// The entire 64-bit group is reserved for later expansion by the SDK. -// -// Example: Setting ClientLogMode to enable logging of retries and requests -// clientLogMode := aws.LogRetries | aws.LogRequest -// -// Example: Adding an additional log mode to an existing ClientLogMode value -// clientLogMode |= aws.LogResponse -type ClientLogMode uint64 - -// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. -const ( -{{- range $index, $field := .ModeBits }} - {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} -{{- end }} -) -{{ range $_, $field := .ModeBits }} -// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set -func (m ClientLogMode) Is{{- bitName $field }}() bool { - return m&{{- (symbolName $field) }} != 0 -} -{{ end }} -{{- range $_, $field := .ModeBits }} -// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit -func (m *ClientLogMode) Clear{{- bitName $field }}() { - *m &^= {{ (symbolName $field) }} -} -{{ end -}} -`)) - -func main() { - uniqueBitFields := make(map[string]struct{}) - - for _, bitName := range config.ModeBits { - if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { - panic(fmt.Sprintf("duplicate bit field: %s", bitName)) - } - uniqueBitFields[bitName] = struct{}{} - } - - file, err := os.Create("logging.go") - if err != nil { - log.Fatal(err) - } - defer file.Close() - - err = tmpl.Execute(file, config) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go deleted file mode 100644 index d66f0960aa50..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go +++ /dev/null @@ -1,213 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - - "github.com/aws/smithy-go/middleware" -) - -// RegisterServiceMetadata registers metadata about the service and operation into the middleware context -// so that it is available at runtime for other middleware to introspect. -type RegisterServiceMetadata struct { - ServiceID string - SigningName string - Region string - OperationName string -} - -// ID returns the middleware identifier. -func (s *RegisterServiceMetadata) ID() string { - return "RegisterServiceMetadata" -} - -// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. -func (s RegisterServiceMetadata) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { - if len(s.ServiceID) > 0 { - ctx = SetServiceID(ctx, s.ServiceID) - } - if len(s.SigningName) > 0 { - ctx = SetSigningName(ctx, s.SigningName) - } - if len(s.Region) > 0 { - ctx = setRegion(ctx, s.Region) - } - if len(s.OperationName) > 0 { - ctx = setOperationName(ctx, s.OperationName) - } - return next.HandleInitialize(ctx, in) -} - -// service metadata keys for storing and lookup of runtime stack information. -type ( - serviceIDKey struct{} - signingNameKey struct{} - signingRegionKey struct{} - regionKey struct{} - operationNameKey struct{} - partitionIDKey struct{} - requiresLegacyEndpointsKey struct{} -) - -// GetServiceID retrieves the service id from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetServiceID(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) - return v -} - -// GetSigningName retrieves the service signing name from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. The resolved signing name is available -// in the signer properties object passed to the signer. -func GetSigningName(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) - return v -} - -// GetSigningRegion retrieves the region from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. The resolved signing region is available -// in the signer properties object passed to the signer. -func GetSigningRegion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) - return v -} - -// GetRegion retrieves the endpoint region from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetRegion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) - return v -} - -// GetOperationName retrieves the service operation metadata from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetOperationName(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) - return v -} - -// GetPartitionID retrieves the endpoint partition id from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetPartitionID(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) - return v -} - -// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint -// customizations need to be executed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetRequiresLegacyEndpoints(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool) - return v -} - -// SetRequiresLegacyEndpoints set or modifies the flag indicated that -// legacy endpoint customizations are needed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) -} - -// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. Use WithSigV4SigningName client option -// funcs instead. -func SetSigningName(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, signingNameKey{}, value) -} - -// SetSigningRegion sets or modifies the region on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option -// funcs instead. -func SetSigningRegion(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, signingRegionKey{}, value) -} - -// SetServiceID sets the service id on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetServiceID(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, serviceIDKey{}, value) -} - -// setRegion sets the endpoint region on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setRegion(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, regionKey{}, value) -} - -// setOperationName sets the service operation on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setOperationName(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, operationNameKey{}, value) -} - -// SetPartitionID sets the partition id of a resolved region on the context -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetPartitionID(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, partitionIDKey{}, value) -} - -// EndpointSource key -type endpointSourceKey struct{} - -// GetEndpointSource returns an endpoint source if set on context -func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { - v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) - return v -} - -// SetEndpointSource sets endpoint source on context -func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { - return middleware.WithStackValue(ctx, endpointSourceKey{}, value) -} - -type signingCredentialsKey struct{} - -// GetSigningCredentials returns the credentials that were used for signing if set on context. -func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { - v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) - return v -} - -// SetSigningCredentials sets the credentails used for signing on the context. -func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { - return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go deleted file mode 100644 index 6d5f0079c2f1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go +++ /dev/null @@ -1,168 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyrand "github.com/aws/smithy-go/rand" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation -// invocation. -type ClientRequestID struct{} - -// ID the identifier for the ClientRequestID -func (r *ClientRequestID) ID() string { - return "ClientRequestID" -} - -// HandleBuild attaches a unique operation invocation id for the operation to the request -func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() - if err != nil { - return out, metadata, err - } - - const invocationIDHeader = "Amz-Sdk-Invocation-Id" - req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) - - return next.HandleBuild(ctx, in) -} - -// RecordResponseTiming records the response timing for the SDK client requests. -type RecordResponseTiming struct{} - -// ID is the middleware identifier -func (a *RecordResponseTiming) ID() string { - return "RecordResponseTiming" -} - -// HandleDeserialize calculates response metadata and clock skew -func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - responseAt := sdk.NowTime() - setResponseAt(&metadata, responseAt) - - var serverTime time.Time - - switch resp := out.RawResponse.(type) { - case *smithyhttp.Response: - respDateHeader := resp.Header.Get("Date") - if len(respDateHeader) == 0 { - break - } - var parseErr error - serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) - if parseErr != nil { - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", - parseErr.Error()) - break - } - setServerTime(&metadata, serverTime) - } - - if !serverTime.IsZero() { - attemptSkew := serverTime.Sub(responseAt) - setAttemptSkew(&metadata, attemptSkew) - } - - return out, metadata, err -} - -type responseAtKey struct{} - -// GetResponseAt returns the time response was received at. -func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { - v, ok = metadata.Get(responseAtKey{}).(time.Time) - return v, ok -} - -// setResponseAt sets the response time on the metadata. -func setResponseAt(metadata *middleware.Metadata, v time.Time) { - metadata.Set(responseAtKey{}, v) -} - -type serverTimeKey struct{} - -// GetServerTime returns the server time for response. -func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { - v, ok = metadata.Get(serverTimeKey{}).(time.Time) - return v, ok -} - -// setServerTime sets the server time on the metadata. -func setServerTime(metadata *middleware.Metadata, v time.Time) { - metadata.Set(serverTimeKey{}, v) -} - -type attemptSkewKey struct{} - -// GetAttemptSkew returns Attempt clock skew for response from metadata. -func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { - v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) - return v, ok -} - -// setAttemptSkew sets the attempt clock skew on the metadata. -func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { - metadata.Set(attemptSkewKey{}, v) -} - -// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack -func AddClientRequestIDMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&ClientRequestID{}, middleware.After) -} - -// AddRecordResponseTiming adds RecordResponseTiming middleware to the -// middleware stack. -func AddRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) -} - -// rawResponseKey is the accessor key used to store and access the -// raw response within the response metadata. -type rawResponseKey struct{} - -// AddRawResponse middleware adds raw response on to the metadata -type AddRawResponse struct{} - -// ID the identifier for the ClientRequestID -func (m *AddRawResponse) ID() string { - return "AddRawResponseToMetadata" -} - -// HandleDeserialize adds raw response on the middleware metadata -func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - metadata.Set(rawResponseKey{}, out.RawResponse) - return out, metadata, err -} - -// AddRawResponseToMetadata adds middleware to the middleware stack that -// store raw response on to the metadata. -func AddRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before) -} - -// GetRawResponse returns raw response set on metadata -func GetRawResponse(metadata middleware.Metadata) interface{} { - return metadata.Get(rawResponseKey{}) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go deleted file mode 100644 index ba262dadcd0d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package middleware - -import "runtime" - -func getNormalizedOSName() (os string) { - switch runtime.GOOS { - case "android": - os = "android" - case "linux": - os = "linux" - case "windows": - os = "windows" - case "darwin": - os = "macos" - case "ios": - os = "ios" - default: - os = "other" - } - return os -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go deleted file mode 100644 index e14a1e4ecb9d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package middleware - -import "runtime" - -func getNormalizedOSName() (os string) { - switch runtime.GOOS { - case "android": - os = "android" - case "linux": - os = "linux" - case "windows": - os = "windows" - case "darwin": - // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 - // For now declare this as "other" until we have a better detection mechanism. - fallthrough - default: - os = "other" - } - return os -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go deleted file mode 100644 index 3f6aaf231e19..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go +++ /dev/null @@ -1,94 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "os" -) - -const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" -const envAmznTraceID = "_X_AMZN_TRACE_ID" -const amznTraceIDHeader = "X-Amzn-Trace-Id" - -// AddRecursionDetection adds recursionDetection to the middleware stack -func AddRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&RecursionDetection{}, middleware.After) -} - -// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent -// to avoid recursion invocation in Lambda -type RecursionDetection struct{} - -// ID returns the middleware identifier -func (m *RecursionDetection) ID() string { - return "RecursionDetection" -} - -// HandleBuild detects Lambda environment and adds its trace ID to request header if absent -func (m *RecursionDetection) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) - xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) - value := req.Header.Get(amznTraceIDHeader) - // only set the X-Amzn-Trace-Id header when it is not set initially, the - // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists - if value != "" || !hasLambdaEnv || !hasTraceID { - return next.HandleBuild(ctx, in) - } - - req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) - return next.HandleBuild(ctx, in) -} - -func percentEncode(s string) string { - upperhex := "0123456789ABCDEF" - hexCount := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEncode(c) { - hexCount++ - } - } - - if hexCount == 0 { - return s - } - - required := len(s) + 2*hexCount - t := make([]byte, required) - j := 0 - for i := 0; i < len(s); i++ { - if c := s[i]; shouldEncode(c) { - t[j] = '%' - t[j+1] = upperhex[c>>4] - t[j+2] = upperhex[c&15] - j += 3 - } else { - t[j] = c - j++ - } - } - return string(t) -} - -func shouldEncode(c byte) bool { - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - switch c { - case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': - return false - default: - return true - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go deleted file mode 100644 index dd3391fe41e3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go +++ /dev/null @@ -1,27 +0,0 @@ -package middleware - -import ( - "github.com/aws/smithy-go/middleware" -) - -// requestIDKey is used to retrieve request id from response metadata -type requestIDKey struct{} - -// SetRequestIDMetadata sets the provided request id over middleware metadata -func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { - metadata.Set(requestIDKey{}, id) -} - -// GetRequestIDMetadata retrieves the request id from middleware metadata -// returns string and bool indicating value of request id, whether request id was set. -func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { - if !metadata.Has(requestIDKey{}) { - return "", false - } - - v, ok := metadata.Get(requestIDKey{}).(string) - if !ok { - return "", true - } - return v, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go deleted file mode 100644 index 128b60a73103..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go +++ /dev/null @@ -1,57 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddRequestIDRetrieverMiddleware adds request id retriever middleware -func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before operation deserializers so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before) -} - -// RequestIDRetriever middleware captures the AWS service request ID from the -// raw response. -type RequestIDRetriever struct { -} - -// ID returns the middleware identifier -func (m *RequestIDRetriever) ID() string { - return "RequestIDRetriever" -} - -// HandleDeserialize pulls the AWS request ID from the response, storing it in -// operation metadata. -func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // Different header which can map to request id - requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} - - for _, h := range requestIDHeaderList { - // check for headers known to contain Request id - if v := resp.Header.Get(h); len(v) != 0 { - // set reqID on metadata for successful responses. - SetRequestIDMetadata(&metadata, v) - - span, _ := tracing.GetSpan(ctx) - span.SetProperty("aws.request_id", v) - break - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go deleted file mode 100644 index 6ee3391be273..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ /dev/null @@ -1,391 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "os" - "runtime" - "sort" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -var languageVersion = strings.TrimPrefix(runtime.Version(), "go") - -// SDKAgentKeyType is the metadata type to add to the SDK agent string -type SDKAgentKeyType int - -// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will -// be mapped to AdditionalMetadata. -const ( - _ SDKAgentKeyType = iota - APIMetadata - OperatingSystemMetadata - LanguageMetadata - EnvironmentMetadata - FeatureMetadata - ConfigMetadata - FrameworkMetadata - AdditionalMetadata - ApplicationIdentifier - FeatureMetadata2 -) - -// Hardcoded value to specify which version of the user agent we're using -const uaMetadata = "ua/2.1" - -func (k SDKAgentKeyType) string() string { - switch k { - case APIMetadata: - return "api" - case OperatingSystemMetadata: - return "os" - case LanguageMetadata: - return "lang" - case EnvironmentMetadata: - return "exec-env" - case FeatureMetadata: - return "ft" - case ConfigMetadata: - return "cfg" - case FrameworkMetadata: - return "lib" - case ApplicationIdentifier: - return "app" - case FeatureMetadata2: - return "m" - case AdditionalMetadata: - fallthrough - default: - return "md" - } -} - -const execEnvVar = `AWS_EXECUTION_ENV` - -var validChars = map[rune]bool{ - '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, - '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, -} - -// UserAgentFeature enumerates tracked SDK features. -type UserAgentFeature string - -// Enumerates UserAgentFeature. -const ( - UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) - - UserAgentFeatureWaiter = "B" - UserAgentFeaturePaginator = "C" - - UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) - UserAgentFeatureRetryModeStandard = "E" - UserAgentFeatureRetryModeAdaptive = "F" - - UserAgentFeatureS3Transfer = "G" - UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) - UserAgentFeatureS3CryptoV2 = "I" // n/a - UserAgentFeatureS3ExpressBucket = "J" - UserAgentFeatureS3AccessGrants = "K" // not yet implemented - - UserAgentFeatureGZIPRequestCompression = "L" - - UserAgentFeatureProtocolRPCV2CBOR = "M" - - UserAgentFeatureAccountIDEndpoint = "O" // DO NOT IMPLEMENT: rules output is not currently defined. SDKs should not parse endpoints for feature information. - UserAgentFeatureAccountIDModePreferred = "P" - UserAgentFeatureAccountIDModeDisabled = "Q" - UserAgentFeatureAccountIDModeRequired = "R" - - UserAgentFeatureRequestChecksumCRC32 = "U" - UserAgentFeatureRequestChecksumCRC32C = "V" - UserAgentFeatureRequestChecksumCRC64 = "W" - UserAgentFeatureRequestChecksumSHA1 = "X" - UserAgentFeatureRequestChecksumSHA256 = "Y" - UserAgentFeatureRequestChecksumWhenSupported = "Z" - UserAgentFeatureRequestChecksumWhenRequired = "a" - UserAgentFeatureResponseChecksumWhenSupported = "b" - UserAgentFeatureResponseChecksumWhenRequired = "c" - - UserAgentFeatureDynamoDBUserAgent = "d" // not yet implemented - - UserAgentFeatureCredentialsCode = "e" - UserAgentFeatureCredentialsJvmSystemProperties = "f" // n/a (this is not a JVM sdk) - UserAgentFeatureCredentialsEnvVars = "g" - UserAgentFeatureCredentialsEnvVarsStsWebIDToken = "h" - UserAgentFeatureCredentialsStsAssumeRole = "i" - UserAgentFeatureCredentialsStsAssumeRoleSaml = "j" // not yet implemented - UserAgentFeatureCredentialsStsAssumeRoleWebID = "k" - UserAgentFeatureCredentialsStsFederationToken = "l" // not yet implemented - UserAgentFeatureCredentialsStsSessionToken = "m" // not yet implemented - UserAgentFeatureCredentialsProfile = "n" - UserAgentFeatureCredentialsProfileSourceProfile = "o" - UserAgentFeatureCredentialsProfileNamedProvider = "p" - UserAgentFeatureCredentialsProfileStsWebIDToken = "q" - UserAgentFeatureCredentialsProfileSso = "r" - UserAgentFeatureCredentialsSso = "s" - UserAgentFeatureCredentialsProfileSsoLegacy = "t" - UserAgentFeatureCredentialsSsoLegacy = "u" - UserAgentFeatureCredentialsProfileProcess = "v" - UserAgentFeatureCredentialsProcess = "w" - UserAgentFeatureCredentialsBoto2ConfigFile = "x" // n/a (this is not boto/Python) - UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) - UserAgentFeatureCredentialsHTTP = "z" - UserAgentFeatureCredentialsIMDS = "0" -) - -var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ - aws.CredentialSourceCode: UserAgentFeatureCredentialsCode, - aws.CredentialSourceEnvVars: UserAgentFeatureCredentialsEnvVars, - aws.CredentialSourceEnvVarsSTSWebIDToken: UserAgentFeatureCredentialsEnvVarsStsWebIDToken, - aws.CredentialSourceSTSAssumeRole: UserAgentFeatureCredentialsStsAssumeRole, - aws.CredentialSourceSTSAssumeRoleSaml: UserAgentFeatureCredentialsStsAssumeRoleSaml, - aws.CredentialSourceSTSAssumeRoleWebID: UserAgentFeatureCredentialsStsAssumeRoleWebID, - aws.CredentialSourceSTSFederationToken: UserAgentFeatureCredentialsStsFederationToken, - aws.CredentialSourceSTSSessionToken: UserAgentFeatureCredentialsStsSessionToken, - aws.CredentialSourceProfile: UserAgentFeatureCredentialsProfile, - aws.CredentialSourceProfileSourceProfile: UserAgentFeatureCredentialsProfileSourceProfile, - aws.CredentialSourceProfileNamedProvider: UserAgentFeatureCredentialsProfileNamedProvider, - aws.CredentialSourceProfileSTSWebIDToken: UserAgentFeatureCredentialsProfileStsWebIDToken, - aws.CredentialSourceProfileSSO: UserAgentFeatureCredentialsProfileSso, - aws.CredentialSourceSSO: UserAgentFeatureCredentialsSso, - aws.CredentialSourceProfileSSOLegacy: UserAgentFeatureCredentialsProfileSsoLegacy, - aws.CredentialSourceSSOLegacy: UserAgentFeatureCredentialsSsoLegacy, - aws.CredentialSourceProfileProcess: UserAgentFeatureCredentialsProfileProcess, - aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess, - aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP, - aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS, -} - -// RequestUserAgent is a build middleware that set the User-Agent for the request. -type RequestUserAgent struct { - sdkAgent, userAgent *smithyhttp.UserAgentBuilder - features map[UserAgentFeature]struct{} -} - -// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the -// request. -// -// User-Agent example: -// -// aws-sdk-go-v2/1.2.3 -// -// X-Amz-User-Agent example: -// -// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 -func NewRequestUserAgent() *RequestUserAgent { - userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() - addProductName(userAgent) - addUserAgentMetadata(userAgent) - addProductName(sdkAgent) - - r := &RequestUserAgent{ - sdkAgent: sdkAgent, - userAgent: userAgent, - features: map[UserAgentFeature]struct{}{}, - } - - addSDKMetadata(r) - - return r -} - -func addSDKMetadata(r *RequestUserAgent) { - r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) - r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) - r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) - r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) - if ev := os.Getenv(execEnvVar); len(ev) > 0 { - r.AddSDKAgentKey(EnvironmentMetadata, ev) - } -} - -func addProductName(builder *smithyhttp.UserAgentBuilder) { - builder.AddKeyValue(aws.SDKName, aws.SDKVersion) -} - -func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) { - builder.AddKey(uaMetadata) -} - -// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. -func AddUserAgentKey(key string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddUserAgentKey(key) - return nil - } -} - -// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. -func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddUserAgentKeyValue(key, value) - return nil - } -} - -// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. -func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddSDKAgentKey(keyType, key) - return nil - } -} - -// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. -func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) - return nil - } -} - -// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. -func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { - _, err := getOrAddRequestUserAgent(stack) - return err -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) { - id := (*RequestUserAgent)(nil).ID() - bm, ok := stack.Build.Get(id) - if !ok { - bm = NewRequestUserAgent() - err := stack.Build.Add(bm, middleware.After) - if err != nil { - return nil, err - } - } - - requestUserAgent, ok := bm.(*RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) - } - - return requestUserAgent, nil -} - -// AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *RequestUserAgent) AddUserAgentKey(key string) { - u.userAgent.AddKey(strings.Map(rules, key)) -} - -// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { - u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) -} - -// AddUserAgentFeature adds the feature ID to the tracking list to be emitted -// in the final User-Agent string. -func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) { - u.features[feature] = struct{}{} -} - -// AddSDKAgentKey adds the component identified by name to the User-Agent string. -func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { - // TODO: should target sdkAgent - u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) -} - -// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { - // TODO: should target sdkAgent - u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) -} - -// AddCredentialsSource adds the credential source as a feature on the User-Agent string -func (u *RequestUserAgent) AddCredentialsSource(source aws.CredentialSource) { - x, ok := credentialSourceToFeature[source] - if ok { - u.AddUserAgentFeature(x) - } -} - -// ID the name of the middleware. -func (u *RequestUserAgent) ID() string { - return "UserAgent" -} - -// HandleBuild adds or appends the constructed user agent to the request. -func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - switch req := in.Request.(type) { - case *smithyhttp.Request: - u.addHTTPUserAgent(req) - // TODO: To be re-enabled - // u.addHTTPSDKAgent(req) - default: - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - return next.HandleBuild(ctx, in) -} - -func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { - const userAgent = "User-Agent" - if len(u.features) > 0 { - updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) - } - updateHTTPHeader(request, userAgent, u.userAgent.Build()) -} - -func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { - const sdkAgent = "X-Amz-User-Agent" - updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) -} - -func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { - var current string - if v := request.Header[header]; len(v) > 0 { - current = v[0] - } - if len(current) > 0 { - current = value + " " + current - } else { - current = value - } - request.Header[header] = append(request.Header[header][:0], current) -} - -func rules(r rune) rune { - switch { - case r >= '0' && r <= '9': - return r - case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z': - return r - case validChars[r]: - return r - default: - return '-' - } -} - -func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string { - fs := make([]string, 0, len(features)) - for f := range features { - fs = append(fs, string(f)) - } - - sort.Strings(fs) - return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ",")) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md deleted file mode 100644 index 73d112597f02..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ /dev/null @@ -1,150 +0,0 @@ -# v1.7.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. - -# v1.6.11 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. - -# v1.6.10 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 - -# v1.6.9 (2025-02-14) - -* **Bug Fix**: Remove max limit on event stream messages - -# v1.6.8 (2025-01-24) - -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.6.7 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. - -# v1.6.6 (2024-10-04) - -* No change notes available for this release. - -# v1.6.5 (2024-09-20) - -* No change notes available for this release. - -# v1.6.4 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. - -# v1.6.3 (2024-06-28) - -* No change notes available for this release. - -# v1.6.2 (2024-03-29) - -* No change notes available for this release. - -# v1.6.1 (2024-02-21) - -* No change notes available for this release. - -# v1.6.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.5.4 (2023-12-07) - -* No change notes available for this release. - -# v1.5.3 (2023-11-30) - -* No change notes available for this release. - -# v1.5.2 (2023-11-29) - -* No change notes available for this release. - -# v1.5.1 (2023-11-15) - -* No change notes available for this release. - -# v1.5.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). - -# v1.4.14 (2023-10-06) - -* No change notes available for this release. - -# v1.4.13 (2023-08-18) - -* No change notes available for this release. - -# v1.4.12 (2023-08-07) - -* No change notes available for this release. - -# v1.4.11 (2023-07-31) - -* No change notes available for this release. - -# v1.4.10 (2022-12-02) - -* No change notes available for this release. - -# v1.4.9 (2022-10-24) - -* No change notes available for this release. - -# v1.4.8 (2022-09-14) - -* No change notes available for this release. - -# v1.4.7 (2022-09-02) - -* No change notes available for this release. - -# v1.4.6 (2022-08-31) - -* No change notes available for this release. - -# v1.4.5 (2022-08-29) - -* No change notes available for this release. - -# v1.4.4 (2022-08-09) - -* No change notes available for this release. - -# v1.4.3 (2022-06-29) - -* No change notes available for this release. - -# v1.4.2 (2022-06-07) - -* No change notes available for this release. - -# v1.4.1 (2022-03-24) - -* No change notes available for this release. - -# v1.4.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.3.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.1.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.0.0 (2021-11-06) - -* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. -* **Release**: Protocol support has been added for AWS event stream. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go deleted file mode 100644 index 151054971a51..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go +++ /dev/null @@ -1,144 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "strconv" -) - -type decodedMessage struct { - rawMessage - Headers decodedHeaders `json:"headers"` -} -type jsonMessage struct { - Length json.Number `json:"total_length"` - HeadersLen json.Number `json:"headers_length"` - PreludeCRC json.Number `json:"prelude_crc"` - Headers decodedHeaders `json:"headers"` - Payload []byte `json:"payload"` - CRC json.Number `json:"message_crc"` -} - -func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { - var jsonMsg jsonMessage - if err = json.Unmarshal(b, &jsonMsg); err != nil { - return err - } - - d.Length, err = numAsUint32(jsonMsg.Length) - if err != nil { - return err - } - d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) - if err != nil { - return err - } - d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) - if err != nil { - return err - } - d.Headers = jsonMsg.Headers - d.Payload = jsonMsg.Payload - d.CRC, err = numAsUint32(jsonMsg.CRC) - if err != nil { - return err - } - - return nil -} - -func (d *decodedMessage) MarshalJSON() ([]byte, error) { - jsonMsg := jsonMessage{ - Length: json.Number(strconv.Itoa(int(d.Length))), - HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), - PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), - Headers: d.Headers, - Payload: d.Payload, - CRC: json.Number(strconv.Itoa(int(d.CRC))), - } - - return json.Marshal(jsonMsg) -} - -func numAsUint32(n json.Number) (uint32, error) { - v, err := n.Int64() - if err != nil { - return 0, fmt.Errorf("failed to get int64 json number, %v", err) - } - - return uint32(v), nil -} - -func (d decodedMessage) Message() Message { - return Message{ - Headers: Headers(d.Headers), - Payload: d.Payload, - } -} - -type decodedHeaders Headers - -func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { - var jsonHeaders []struct { - Name string `json:"name"` - Type valueType `json:"type"` - Value interface{} `json:"value"` - } - - decoder := json.NewDecoder(bytes.NewReader(b)) - decoder.UseNumber() - if err := decoder.Decode(&jsonHeaders); err != nil { - return err - } - - var headers Headers - for _, h := range jsonHeaders { - value, err := valueFromType(h.Type, h.Value) - if err != nil { - return err - } - headers.Set(h.Name, value) - } - *hs = decodedHeaders(headers) - - return nil -} - -func valueFromType(typ valueType, val interface{}) (Value, error) { - switch typ { - case trueValueType: - return BoolValue(true), nil - case falseValueType: - return BoolValue(false), nil - case int8ValueType: - v, err := val.(json.Number).Int64() - return Int8Value(int8(v)), err - case int16ValueType: - v, err := val.(json.Number).Int64() - return Int16Value(int16(v)), err - case int32ValueType: - v, err := val.(json.Number).Int64() - return Int32Value(int32(v)), err - case int64ValueType: - v, err := val.(json.Number).Int64() - return Int64Value(v), err - case bytesValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return BytesValue(v), err - case stringValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return StringValue(string(v)), err - case timestampValueType: - v, err := val.(json.Number).Int64() - return TimestampValue(timeFromEpochMilli(v)), err - case uuidValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - var tv UUIDValue - copy(tv[:], v) - return tv, err - default: - panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go deleted file mode 100644 index d9ab7652f4a5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go +++ /dev/null @@ -1,218 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "github.com/aws/smithy-go/logging" - "hash" - "hash/crc32" - "io" -) - -// DecoderOptions is the Decoder configuration options. -type DecoderOptions struct { - Logger logging.Logger - LogMessages bool -} - -// Decoder provides decoding of an Event Stream messages. -type Decoder struct { - options DecoderOptions -} - -// NewDecoder initializes and returns a Decoder for decoding event -// stream messages from the reader provided. -func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder { - options := DecoderOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Decoder{ - options: options, - } -} - -// Decode attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if decodeMessage fails to read -// the message from the stream. -// -// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers -// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying -// payloadBuf byte slice. -func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { - if d.options.Logger != nil && d.options.LogMessages { - debugMsgBuf := bytes.NewBuffer(nil) - reader = io.TeeReader(reader, debugMsgBuf) - defer func() { - logMessageDecode(d.options.Logger, debugMsgBuf, m, err) - }() - } - - m, err = decodeMessage(reader, payloadBuf) - - return m, err -} - -// decodeMessage attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if decodeMessage fails to read -// the message from the reader. -func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) { - crc := crc32.New(crc32IEEETable) - hashReader := io.TeeReader(reader, crc) - - prelude, err := decodePrelude(hashReader, crc) - if err != nil { - return Message{}, err - } - - if prelude.HeadersLen > 0 { - lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) - m.Headers, err = decodeHeaders(lr) - if err != nil { - return Message{}, err - } - } - - if payloadLen := prelude.PayloadLen(); payloadLen > 0 { - buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) - if err != nil { - return Message{}, err - } - m.Payload = buf - } - - msgCRC := crc.Sum32() - if err := validateCRC(reader, msgCRC); err != nil { - return Message{}, err - } - - return m, nil -} - -func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Logf(logging.Debug, w.String()) }() - - fmt.Fprintf(w, "Raw message:\n%s\n", - hex.Dump(msgBuf.Bytes())) - - if decodeErr != nil { - fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr) - return - } - - rawMsg, err := msg.rawMessage() - if err != nil { - fmt.Fprintf(w, "failed to create raw message, %v\n", err) - return - } - - decodedMsg := decodedMessage{ - rawMessage: rawMsg, - Headers: decodedHeaders(msg.Headers), - } - - fmt.Fprintf(w, "Decoded message:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(decodedMsg); err != nil { - fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) - } -} - -func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { - var p messagePrelude - - var err error - p.Length, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - p.HeadersLen, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - if err := p.ValidateLens(); err != nil { - return messagePrelude{}, err - } - - preludeCRC := crc.Sum32() - if err := validateCRC(r, preludeCRC); err != nil { - return messagePrelude{}, err - } - - p.PreludeCRC = preludeCRC - - return p, nil -} - -func decodePayload(buf []byte, r io.Reader) ([]byte, error) { - w := bytes.NewBuffer(buf[0:0]) - - _, err := io.Copy(w, r) - return w.Bytes(), err -} - -func decodeUint8(r io.Reader) (uint8, error) { - type byteReader interface { - ReadByte() (byte, error) - } - - if br, ok := r.(byteReader); ok { - v, err := br.ReadByte() - return v, err - } - - var b [1]byte - _, err := io.ReadFull(r, b[:]) - return b[0], err -} - -func decodeUint16(r io.Reader) (uint16, error) { - var b [2]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(bs), nil -} - -func decodeUint32(r io.Reader) (uint32, error) { - var b [4]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(bs), nil -} - -func decodeUint64(r io.Reader) (uint64, error) { - var b [8]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint64(bs), nil -} - -func validateCRC(r io.Reader, expect uint32) error { - msgCRC, err := decodeUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return ChecksumError{} - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go deleted file mode 100644 index f03ee4b934b0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go +++ /dev/null @@ -1,167 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "github.com/aws/smithy-go/logging" - "hash" - "hash/crc32" - "io" -) - -// EncoderOptions is the configuration options for Encoder. -type EncoderOptions struct { - Logger logging.Logger - LogMessages bool -} - -// Encoder provides EventStream message encoding. -type Encoder struct { - options EncoderOptions - - headersBuf *bytes.Buffer - messageBuf *bytes.Buffer -} - -// NewEncoder initializes and returns an Encoder to encode Event Stream -// messages. -func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder { - o := EncoderOptions{} - - for _, fn := range optFns { - fn(&o) - } - - return &Encoder{ - options: o, - headersBuf: bytes.NewBuffer(nil), - messageBuf: bytes.NewBuffer(nil), - } -} - -// Encode encodes a single EventStream message to the io.Writer the Encoder -// was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(w io.Writer, msg Message) (err error) { - e.headersBuf.Reset() - e.messageBuf.Reset() - - var writer io.Writer = e.messageBuf - if e.options.Logger != nil && e.options.LogMessages { - encodeMsgBuf := bytes.NewBuffer(nil) - writer = io.MultiWriter(writer, encodeMsgBuf) - defer func() { - logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err) - }() - } - - if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { - return err - } - - crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(writer, crc) - - headersLen := uint32(e.headersBuf.Len()) - payloadLen := uint32(len(msg.Payload)) - - if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { - return err - } - - if headersLen > 0 { - if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { - return err - } - } - - if payloadLen > 0 { - if _, err = hashWriter.Write(msg.Payload); err != nil { - return err - } - } - - msgCRC := crc.Sum32() - if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil { - return err - } - - _, err = io.Copy(w, e.messageBuf) - - return err -} - -func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Logf(logging.Debug, w.String()) }() - - fmt.Fprintf(w, "Message to encode:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(msg); err != nil { - fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) - } - - if encodeErr != nil { - fmt.Fprintf(w, "Encode error: %v\n", encodeErr) - return - } - - fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) -} - -func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { - p := messagePrelude{ - Length: minMsgLen + headersLen + payloadLen, - HeadersLen: headersLen, - } - if err := p.ValidateLens(); err != nil { - return err - } - - err := binaryWriteFields(w, binary.BigEndian, - p.Length, - p.HeadersLen, - ) - if err != nil { - return err - } - - p.PreludeCRC = crc.Sum32() - err = binary.Write(w, binary.BigEndian, p.PreludeCRC) - if err != nil { - return err - } - - return nil -} - -// EncodeHeaders writes the header values to the writer encoded in the event -// stream format. Returns an error if a header fails to encode. -func EncodeHeaders(w io.Writer, headers Headers) error { - for _, h := range headers { - hn := headerName{ - Len: uint8(len(h.Name)), - } - copy(hn.Name[:hn.Len], h.Name) - if err := hn.encode(w); err != nil { - return err - } - - if err := h.Value.encode(w); err != nil { - return err - } - } - - return nil -} - -func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { - for _, v := range vs { - if err := binary.Write(w, order, v); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go deleted file mode 100644 index 5481ef30796d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go +++ /dev/null @@ -1,23 +0,0 @@ -package eventstream - -import "fmt" - -// LengthError provides the error for items being larger than a maximum length. -type LengthError struct { - Part string - Want int - Have int - Value interface{} -} - -func (e LengthError) Error() string { - return fmt.Sprintf("%s length invalid, %d/%d, %v", - e.Part, e.Want, e.Have, e.Value) -} - -// ChecksumError provides the error for message checksum invalidation errors. -type ChecksumError struct{} - -func (e ChecksumError) Error() string { - return "message checksum mismatch" -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go deleted file mode 100644 index 93ea71ffdf8c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go +++ /dev/null @@ -1,24 +0,0 @@ -package eventstreamapi - -// EventStream headers with specific meaning to async API functionality. -const ( - ChunkSignatureHeader = `:chunk-signature` // chunk signature for message - DateHeader = `:date` // Date header for signature - ContentTypeHeader = ":content-type" // message payload content-type - - // Message header and values - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go deleted file mode 100644 index d07ff6b89e14..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go +++ /dev/null @@ -1,71 +0,0 @@ -package eventstreamapi - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -type eventStreamWriterKey struct{} - -// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream. -func GetInputStreamWriter(ctx context.Context) io.WriteCloser { - writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser) - return writeCloser -} - -func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context { - return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser) -} - -// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages -// via the HTTP request body. -type InitializeStreamWriter struct{} - -// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack. -func AddInitializeStreamWriter(stack *middleware.Stack) error { - return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After) -} - -// ID returns the identifier for the middleware. -func (i *InitializeStreamWriter) ID() string { - return "InitializeStreamWriter" -} - -// HandleFinalize is the middleware implementation. -func (i *InitializeStreamWriter) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) - } - - inputReader, inputWriter := io.Pipe() - defer func() { - if err == nil { - return - } - _ = inputReader.Close() - _ = inputWriter.Close() - }() - - request, err = request.SetStream(inputReader) - if err != nil { - return out, metadata, err - } - in.Request = request - - ctx = setInputStreamWriter(ctx, inputWriter) - - out, metadata, err = next.HandleFinalize(ctx, in) - if err != nil { - return out, metadata, err - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go deleted file mode 100644 index cbf5a28621b0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package eventstreamapi - -import smithyhttp "github.com/aws/smithy-go/transport/http" - -// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. -// -// This operation is a no-op for Go 1.18 and above. -func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go deleted file mode 100644 index 7d10ec2ebff5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.18 -// +build !go1.18 - -package eventstreamapi - -import smithyhttp "github.com/aws/smithy-go/transport/http" - -// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. -func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { - r.Header.Set("Expect", "100-continue") - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go deleted file mode 100644 index 7b63c276311c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package eventstream - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go deleted file mode 100644 index f6f8c5674eda..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go +++ /dev/null @@ -1,175 +0,0 @@ -package eventstream - -import ( - "encoding/binary" - "fmt" - "io" -) - -// Headers are a collection of EventStream header values. -type Headers []Header - -// Header is a single EventStream Key Value header pair. -type Header struct { - Name string - Value Value -} - -// Set associates the name with a value. If the header name already exists in -// the Headers the value will be replaced with the new one. -func (hs *Headers) Set(name string, value Value) { - var i int - for ; i < len(*hs); i++ { - if (*hs)[i].Name == name { - (*hs)[i].Value = value - return - } - } - - *hs = append(*hs, Header{ - Name: name, Value: value, - }) -} - -// Get returns the Value associated with the header. Nil is returned if the -// value does not exist. -func (hs Headers) Get(name string) Value { - for i := 0; i < len(hs); i++ { - if h := hs[i]; h.Name == name { - return h.Value - } - } - return nil -} - -// Del deletes the value in the Headers if it exists. -func (hs *Headers) Del(name string) { - for i := 0; i < len(*hs); i++ { - if (*hs)[i].Name == name { - copy((*hs)[i:], (*hs)[i+1:]) - (*hs) = (*hs)[:len(*hs)-1] - } - } -} - -// Clone returns a deep copy of the headers -func (hs Headers) Clone() Headers { - o := make(Headers, 0, len(hs)) - for _, h := range hs { - o.Set(h.Name, h.Value) - } - return o -} - -func decodeHeaders(r io.Reader) (Headers, error) { - hs := Headers{} - - for { - name, err := decodeHeaderName(r) - if err != nil { - if err == io.EOF { - // EOF while getting header name means no more headers - break - } - return nil, err - } - - value, err := decodeHeaderValue(r) - if err != nil { - return nil, err - } - - hs.Set(name, value) - } - - return hs, nil -} - -func decodeHeaderName(r io.Reader) (string, error) { - var n headerName - - var err error - n.Len, err = decodeUint8(r) - if err != nil { - return "", err - } - - name := n.Name[:n.Len] - if _, err := io.ReadFull(r, name); err != nil { - return "", err - } - - return string(name), nil -} - -func decodeHeaderValue(r io.Reader) (Value, error) { - var raw rawValue - - typ, err := decodeUint8(r) - if err != nil { - return nil, err - } - raw.Type = valueType(typ) - - var v Value - - switch raw.Type { - case trueValueType: - v = BoolValue(true) - case falseValueType: - v = BoolValue(false) - case int8ValueType: - var tv Int8Value - err = tv.decode(r) - v = tv - case int16ValueType: - var tv Int16Value - err = tv.decode(r) - v = tv - case int32ValueType: - var tv Int32Value - err = tv.decode(r) - v = tv - case int64ValueType: - var tv Int64Value - err = tv.decode(r) - v = tv - case bytesValueType: - var tv BytesValue - err = tv.decode(r) - v = tv - case stringValueType: - var tv StringValue - err = tv.decode(r) - v = tv - case timestampValueType: - var tv TimestampValue - err = tv.decode(r) - v = tv - case uuidValueType: - var tv UUIDValue - err = tv.decode(r) - v = tv - default: - panic(fmt.Sprintf("unknown value type %d", raw.Type)) - } - - // Error could be EOF, let caller deal with it - return v, err -} - -const maxHeaderNameLen = 255 - -type headerName struct { - Len uint8 - Name [maxHeaderNameLen]byte -} - -func (v headerName) encode(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { - return err - } - - _, err := w.Write(v.Name[:v.Len]) - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go deleted file mode 100644 index 423b6bb26c1e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go +++ /dev/null @@ -1,521 +0,0 @@ -package eventstream - -import ( - "encoding/base64" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "strconv" - "time" -) - -const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 - -// valueType is the EventStream header value type. -type valueType uint8 - -// Header value types -const ( - trueValueType valueType = iota - falseValueType - int8ValueType // Byte - int16ValueType // Short - int32ValueType // Integer - int64ValueType // Long - bytesValueType - stringValueType - timestampValueType - uuidValueType -) - -func (t valueType) String() string { - switch t { - case trueValueType: - return "bool" - case falseValueType: - return "bool" - case int8ValueType: - return "int8" - case int16ValueType: - return "int16" - case int32ValueType: - return "int32" - case int64ValueType: - return "int64" - case bytesValueType: - return "byte_array" - case stringValueType: - return "string" - case timestampValueType: - return "timestamp" - case uuidValueType: - return "uuid" - default: - return fmt.Sprintf("unknown value type %d", uint8(t)) - } -} - -type rawValue struct { - Type valueType - Len uint16 // Only set for variable length slices - Value []byte // byte representation of value, BigEndian encoding. -} - -func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { - return binaryWriteFields(w, binary.BigEndian, - r.Type, - v, - ) -} - -func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { - binary.Write(w, binary.BigEndian, r.Type) - - _, err := w.Write(v) - return err -} - -func (r rawValue) encodeBytes(w io.Writer, v []byte) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - _, err = w.Write(v) - return err -} - -func (r rawValue) encodeString(w io.Writer, v string) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - type stringWriter interface { - WriteString(string) (int, error) - } - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - if sw, ok := w.(stringWriter); ok { - _, err = sw.WriteString(v) - } else { - _, err = w.Write([]byte(v)) - } - - return err -} - -func decodeFixedBytesValue(r io.Reader, buf []byte) error { - _, err := io.ReadFull(r, buf) - return err -} - -func decodeBytesValue(r io.Reader) ([]byte, error) { - var raw rawValue - var err error - raw.Len, err = decodeUint16(r) - if err != nil { - return nil, err - } - - buf := make([]byte, raw.Len) - _, err = io.ReadFull(r, buf) - if err != nil { - return nil, err - } - - return buf, nil -} - -func decodeStringValue(r io.Reader) (string, error) { - v, err := decodeBytesValue(r) - return string(v), err -} - -// Value represents the abstract header value. -type Value interface { - Get() interface{} - String() string - valueType() valueType - encode(io.Writer) error -} - -// An BoolValue provides eventstream encoding, and representation -// of a Go bool value. -type BoolValue bool - -// Get returns the underlying type -func (v BoolValue) Get() interface{} { - return bool(v) -} - -// valueType returns the EventStream header value type value. -func (v BoolValue) valueType() valueType { - if v { - return trueValueType - } - return falseValueType -} - -func (v BoolValue) String() string { - return strconv.FormatBool(bool(v)) -} - -// encode encodes the BoolValue into an eventstream binary value -// representation. -func (v BoolValue) encode(w io.Writer) error { - return binary.Write(w, binary.BigEndian, v.valueType()) -} - -// An Int8Value provides eventstream encoding, and representation of a Go -// int8 value. -type Int8Value int8 - -// Get returns the underlying value. -func (v Int8Value) Get() interface{} { - return int8(v) -} - -// valueType returns the EventStream header value type value. -func (Int8Value) valueType() valueType { - return int8ValueType -} - -func (v Int8Value) String() string { - return fmt.Sprintf("0x%02x", int8(v)) -} - -// encode encodes the Int8Value into an eventstream binary value -// representation. -func (v Int8Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeScalar(w, v) -} - -func (v *Int8Value) decode(r io.Reader) error { - n, err := decodeUint8(r) - if err != nil { - return err - } - - *v = Int8Value(n) - return nil -} - -// An Int16Value provides eventstream encoding, and representation of a Go -// int16 value. -type Int16Value int16 - -// Get returns the underlying value. -func (v Int16Value) Get() interface{} { - return int16(v) -} - -// valueType returns the EventStream header value type value. -func (Int16Value) valueType() valueType { - return int16ValueType -} - -func (v Int16Value) String() string { - return fmt.Sprintf("0x%04x", int16(v)) -} - -// encode encodes the Int16Value into an eventstream binary value -// representation. -func (v Int16Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int16Value) decode(r io.Reader) error { - n, err := decodeUint16(r) - if err != nil { - return err - } - - *v = Int16Value(n) - return nil -} - -// An Int32Value provides eventstream encoding, and representation of a Go -// int32 value. -type Int32Value int32 - -// Get returns the underlying value. -func (v Int32Value) Get() interface{} { - return int32(v) -} - -// valueType returns the EventStream header value type value. -func (Int32Value) valueType() valueType { - return int32ValueType -} - -func (v Int32Value) String() string { - return fmt.Sprintf("0x%08x", int32(v)) -} - -// encode encodes the Int32Value into an eventstream binary value -// representation. -func (v Int32Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int32Value) decode(r io.Reader) error { - n, err := decodeUint32(r) - if err != nil { - return err - } - - *v = Int32Value(n) - return nil -} - -// An Int64Value provides eventstream encoding, and representation of a Go -// int64 value. -type Int64Value int64 - -// Get returns the underlying value. -func (v Int64Value) Get() interface{} { - return int64(v) -} - -// valueType returns the EventStream header value type value. -func (Int64Value) valueType() valueType { - return int64ValueType -} - -func (v Int64Value) String() string { - return fmt.Sprintf("0x%016x", int64(v)) -} - -// encode encodes the Int64Value into an eventstream binary value -// representation. -func (v Int64Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int64Value) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = Int64Value(n) - return nil -} - -// An BytesValue provides eventstream encoding, and representation of a Go -// byte slice. -type BytesValue []byte - -// Get returns the underlying value. -func (v BytesValue) Get() interface{} { - return []byte(v) -} - -// valueType returns the EventStream header value type value. -func (BytesValue) valueType() valueType { - return bytesValueType -} - -func (v BytesValue) String() string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -// encode encodes the BytesValue into an eventstream binary value -// representation. -func (v BytesValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeBytes(w, []byte(v)) -} - -func (v *BytesValue) decode(r io.Reader) error { - buf, err := decodeBytesValue(r) - if err != nil { - return err - } - - *v = BytesValue(buf) - return nil -} - -// An StringValue provides eventstream encoding, and representation of a Go -// string. -type StringValue string - -// Get returns the underlying value. -func (v StringValue) Get() interface{} { - return string(v) -} - -// valueType returns the EventStream header value type value. -func (StringValue) valueType() valueType { - return stringValueType -} - -func (v StringValue) String() string { - return string(v) -} - -// encode encodes the StringValue into an eventstream binary value -// representation. -func (v StringValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeString(w, string(v)) -} - -func (v *StringValue) decode(r io.Reader) error { - s, err := decodeStringValue(r) - if err != nil { - return err - } - - *v = StringValue(s) - return nil -} - -// An TimestampValue provides eventstream encoding, and representation of a Go -// timestamp. -type TimestampValue time.Time - -// Get returns the underlying value. -func (v TimestampValue) Get() interface{} { - return time.Time(v) -} - -// valueType returns the EventStream header value type value. -func (TimestampValue) valueType() valueType { - return timestampValueType -} - -func (v TimestampValue) epochMilli() int64 { - nano := time.Time(v).UnixNano() - msec := nano / int64(time.Millisecond) - return msec -} - -func (v TimestampValue) String() string { - msec := v.epochMilli() - return strconv.FormatInt(msec, 10) -} - -// encode encodes the TimestampValue into an eventstream binary value -// representation. -func (v TimestampValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - msec := v.epochMilli() - return raw.encodeScalar(w, msec) -} - -func (v *TimestampValue) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = TimestampValue(timeFromEpochMilli(int64(n))) - return nil -} - -// MarshalJSON implements the json.Marshaler interface -func (v TimestampValue) MarshalJSON() ([]byte, error) { - return []byte(v.String()), nil -} - -func timeFromEpochMilli(t int64) time.Time { - secs := t / 1e3 - msec := t % 1e3 - return time.Unix(secs, msec*int64(time.Millisecond)).UTC() -} - -// An UUIDValue provides eventstream encoding, and representation of a UUID -// value. -type UUIDValue [16]byte - -// Get returns the underlying value. -func (v UUIDValue) Get() interface{} { - return v[:] -} - -// valueType returns the EventStream header value type value. -func (UUIDValue) valueType() valueType { - return uuidValueType -} - -func (v UUIDValue) String() string { - var scratch [36]byte - - const dash = '-' - - hex.Encode(scratch[:8], v[0:4]) - scratch[8] = dash - hex.Encode(scratch[9:13], v[4:6]) - scratch[13] = dash - hex.Encode(scratch[14:18], v[6:8]) - scratch[18] = dash - hex.Encode(scratch[19:23], v[8:10]) - scratch[23] = dash - hex.Encode(scratch[24:], v[10:]) - - return string(scratch[:]) -} - -// encode encodes the UUIDValue into an eventstream binary value -// representation. -func (v UUIDValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeFixedSlice(w, v[:]) -} - -func (v *UUIDValue) decode(r io.Reader) error { - tv := (*v)[:] - return decodeFixedBytesValue(r, tv) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go deleted file mode 100644 index 1a77654f7e54..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go +++ /dev/null @@ -1,99 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "hash/crc32" -) - -const preludeLen = 8 -const preludeCRCLen = 4 -const msgCRCLen = 4 -const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen - -var crc32IEEETable = crc32.MakeTable(crc32.IEEE) - -// A Message provides the eventstream message representation. -type Message struct { - Headers Headers - Payload []byte -} - -func (m *Message) rawMessage() (rawMessage, error) { - var raw rawMessage - - if len(m.Headers) > 0 { - var headers bytes.Buffer - if err := EncodeHeaders(&headers, m.Headers); err != nil { - return rawMessage{}, err - } - raw.Headers = headers.Bytes() - raw.HeadersLen = uint32(len(raw.Headers)) - } - - raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen - - hash := crc32.New(crc32IEEETable) - binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) - raw.PreludeCRC = hash.Sum32() - - binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) - - if raw.HeadersLen > 0 { - hash.Write(raw.Headers) - } - - // Read payload bytes and update hash for it as well. - if len(m.Payload) > 0 { - raw.Payload = m.Payload - hash.Write(raw.Payload) - } - - raw.CRC = hash.Sum32() - - return raw, nil -} - -// Clone returns a deep copy of the message. -func (m Message) Clone() Message { - var payload []byte - if m.Payload != nil { - payload = make([]byte, len(m.Payload)) - copy(payload, m.Payload) - } - - return Message{ - Headers: m.Headers.Clone(), - Payload: payload, - } -} - -type messagePrelude struct { - Length uint32 - HeadersLen uint32 - PreludeCRC uint32 -} - -func (p messagePrelude) PayloadLen() uint32 { - return p.Length - p.HeadersLen - minMsgLen -} - -func (p messagePrelude) ValidateLens() error { - if p.Length == 0 { - return LengthError{ - Part: "message prelude", - Want: minMsgLen, - Have: int(p.Length), - } - } - return nil -} - -type rawMessage struct { - messagePrelude - - Headers []byte - Payload []byte - - CRC uint32 -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go deleted file mode 100644 index 6669a3ddfd04..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ /dev/null @@ -1,61 +0,0 @@ -package query - -import ( - "net/url" - "strconv" -) - -// Array represents the encoding of Query lists and sets. A Query array is a -// representation of a list of values of a fixed type. A serialized array might -// look like the following: -// -// ListName.member.1=foo -// &ListName.member.2=bar -// &Listname.member.3=baz -type Array struct { - // The query values to add the array to. - values url.Values - // The array's prefix, which includes the names of all parent structures - // and ends with the name of the list. For example, the prefix might be - // "ParentStructure.ListName". This prefix will be used to form the full - // keys for each element in the list. For example, an entry might have the - // key "ParentStructure.ListName.member.MemberName.1". - // - // When the array is not flat the prefix will contain the memberName otherwise the memberName is ignored - prefix string - // Elements are stored in values, so we keep track of the list size here. - size int32 - // Empty lists are encoded as "=", if we add a value later we will - // remove this encoding - emptyValue Value -} - -func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { - emptyValue := newValue(values, prefix, flat) - emptyValue.String("") - - if !flat { - // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead - prefix = prefix + keySeparator + memberName - } - - return &Array{ - values: values, - prefix: prefix, - emptyValue: emptyValue, - } -} - -// Value adds a new element to the Query Array. Returns a Value type used to -// encode the array element. -func (a *Array) Value() Value { - if a.size == 0 { - delete(a.values, a.emptyValue.key) - } - - // Query lists start a 1, so adjust the size first - a.size++ - // Lists can't have flat members - // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead - return newValue(a.values, a.prefix+keySeparator+strconv.FormatInt(int64(a.size), 10), false) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go deleted file mode 100644 index 2ecf9241cdd7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go +++ /dev/null @@ -1,80 +0,0 @@ -package query - -import ( - "io" - "net/url" - "sort" -) - -// Encoder is a Query encoder that supports construction of Query body -// values using methods. -type Encoder struct { - // The query values that will be built up to manage encoding. - values url.Values - // The writer that the encoded body will be written to. - writer io.Writer - Value -} - -// NewEncoder returns a new Query body encoder -func NewEncoder(writer io.Writer) *Encoder { - values := url.Values{} - return &Encoder{ - values: values, - writer: writer, - Value: newBaseValue(values), - } -} - -// Encode returns the []byte slice representing the current -// state of the Query encoder. -func (e Encoder) Encode() error { - ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) - if !ok { - // Fall back to less optimal byte slice casting if WriteString isn't available. - ws = &wrapWriteString{writer: e.writer} - } - - // Get the keys and sort them to have a stable output - keys := make([]string, 0, len(e.values)) - for k := range e.values { - keys = append(keys, k) - } - sort.Strings(keys) - isFirstEntry := true - for _, key := range keys { - queryValues := e.values[key] - escapedKey := url.QueryEscape(key) - for _, value := range queryValues { - if !isFirstEntry { - if _, err := ws.WriteString(`&`); err != nil { - return err - } - } else { - isFirstEntry = false - } - if _, err := ws.WriteString(escapedKey); err != nil { - return err - } - if _, err := ws.WriteString(`=`); err != nil { - return err - } - if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { - return err - } - } - } - return nil -} - -// wrapWriteString wraps an io.Writer to provide a WriteString method -// where one is not available. -type wrapWriteString struct { - writer io.Writer -} - -// WriteString writes a string to the wrapped writer by casting it to -// a byte array first. -func (w wrapWriteString) WriteString(v string) (int, error) { - return w.writer.Write([]byte(v)) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go deleted file mode 100644 index dea242b8b6d7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go +++ /dev/null @@ -1,78 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Map represents the encoding of Query maps. A Query map is a representation -// of a mapping of arbitrary string keys to arbitrary values of a fixed type. -// A Map differs from an Object in that the set of keys is not fixed, in that -// the values must all be of the same type, and that map entries are ordered. -// A serialized map might look like the following: -// -// MapName.entry.1.key=Foo -// &MapName.entry.1.value=spam -// &MapName.entry.2.key=Bar -// &MapName.entry.2.value=eggs -type Map struct { - // The query values to add the map to. - values url.Values - // The map's prefix, which includes the names of all parent structures - // and ends with the name of the object. For example, the prefix might be - // "ParentStructure.MapName". This prefix will be used to form the full - // keys for each key-value pair of the map. For example, a value might have - // the key "ParentStructure.MapName.1.value". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string - // Whether the map is flat or not. A map that is not flat will produce the - // following entries to the url.Values for a given key-value pair: - // MapName.entry.1.KeyLocationName=mykey - // MapName.entry.1.ValueLocationName=myvalue - // A map that is flat will produce the following: - // MapName.1.KeyLocationName=mykey - // MapName.1.ValueLocationName=myvalue - flat bool - // The location name of the key. In most cases this should be "key". - keyLocationName string - // The location name of the value. In most cases this should be "value". - valueLocationName string - // Elements are stored in values, so we keep track of the list size here. - size int32 -} - -func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { - return &Map{ - values: values, - prefix: prefix, - flat: flat, - keyLocationName: keyLocationName, - valueLocationName: valueLocationName, - } -} - -// Key adds the given named key to the Query map. -// Returns a Value encoder that should be used to encode a Query value type. -func (m *Map) Key(name string) Value { - // Query lists start a 1, so adjust the size first - m.size++ - var key string - var value string - if m.flat { - key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) - value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) - } else { - key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) - value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) - } - - // The key can only be a string, so we just go ahead and set it here - newValue(m.values, key, false).String(name) - - // Maps can't have flat members - return newValue(m.values, value, false) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go deleted file mode 100644 index 36034479113b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go +++ /dev/null @@ -1,62 +0,0 @@ -package query - -import ( - "context" - "fmt" - "io/ioutil" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the -// operation serializer that will convert the query request body to a GET -// operation with the query message in the HTTP request querystring. -func AddAsGetRequestMiddleware(stack *middleware.Stack) error { - return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) -} - -type asGetRequest struct{} - -func (*asGetRequest) ID() string { return "Query:AsGetRequest" } - -func (m *asGetRequest) HandleSerialize( - ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) - } - - req.Method = "GET" - - // If the stream is not set, nothing else to do. - stream := req.GetStream() - if stream == nil { - return next.HandleSerialize(ctx, input) - } - - // Clear the stream since there will not be any body. - req.Header.Del("Content-Type") - req, err = req.SetStream(nil) - if err != nil { - return out, metadata, fmt.Errorf("unable update request body %w", err) - } - input.Request = req - - // Update request query with the body's query string value. - delim := "" - if len(req.URL.RawQuery) != 0 { - delim = "&" - } - - b, err := ioutil.ReadAll(stream) - if err != nil { - return out, metadata, fmt.Errorf("unable to get request body %w", err) - } - req.URL.RawQuery += delim + string(b) - - return next.HandleSerialize(ctx, input) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go deleted file mode 100644 index 305a8ace3022..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go +++ /dev/null @@ -1,68 +0,0 @@ -package query - -import "net/url" - -// Object represents the encoding of Query structures and unions. A Query -// object is a representation of a mapping of string keys to arbitrary -// values where there is a fixed set of keys whose values each have their -// own known type. A serialized object might look like the following: -// -// ObjectName.Foo=value -// &ObjectName.Bar=5 -type Object struct { - // The query values to add the object to. - values url.Values - // The object's prefix, which includes the names of all parent structures - // and ends with the name of the object. For example, the prefix might be - // "ParentStructure.ObjectName". This prefix will be used to form the full - // keys for each member of the object. For example, a member might have the - // key "ParentStructure.ObjectName.MemberName". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string -} - -func newObject(values url.Values, prefix string) *Object { - return &Object{ - values: values, - prefix: prefix, - } -} - -// Key adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query value type. -func (o *Object) Key(name string) Value { - return o.key(name, false) -} - -// KeyWithValues adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query list of values. -func (o *Object) KeyWithValues(name string) Value { - return o.keyWithValues(name, false) -} - -// FlatKey adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query value type. The -// value will be flattened if it is a map or array. -func (o *Object) FlatKey(name string) Value { - return o.key(name, true) -} - -func (o *Object) key(name string, flatValue bool) Value { - if o.prefix != "" { - // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead - return newValue(o.values, o.prefix+keySeparator+name, flatValue) - } - return newValue(o.values, name, flatValue) -} - -func (o *Object) keyWithValues(name string, flatValue bool) Value { - if o.prefix != "" { - // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead - return newAppendValue(o.values, o.prefix+keySeparator+name, flatValue) - } - return newAppendValue(o.values, name, flatValue) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go deleted file mode 100644 index 8063c592dddd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go +++ /dev/null @@ -1,117 +0,0 @@ -package query - -import ( - "math/big" - "net/url" - - "github.com/aws/smithy-go/encoding/httpbinding" -) - -const keySeparator = "." - -// Value represents a Query Value type. -type Value struct { - // The query values to add the value to. - values url.Values - // The value's key, which will form the prefix for complex types. - key string - // Whether the value should be flattened or not if it's a flattenable type. - flat bool - queryValue httpbinding.QueryValue -} - -func newValue(values url.Values, key string, flat bool) Value { - return Value{ - values: values, - key: key, - flat: flat, - queryValue: httpbinding.NewQueryValue(values, key, false), - } -} - -func newAppendValue(values url.Values, key string, flat bool) Value { - return Value{ - values: values, - key: key, - flat: flat, - queryValue: httpbinding.NewQueryValue(values, key, true), - } -} - -func newBaseValue(values url.Values) Value { - return Value{ - values: values, - queryValue: httpbinding.NewQueryValue(nil, "", false), - } -} - -// Array returns a new Array encoder. -func (qv Value) Array(locationName string) *Array { - return newArray(qv.values, qv.key, qv.flat, locationName) -} - -// Object returns a new Object encoder. -func (qv Value) Object() *Object { - return newObject(qv.values, qv.key) -} - -// Map returns a new Map encoder. -func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { - return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) -} - -// Base64EncodeBytes encodes v as a base64 query string value. -// This is intended to enable compatibility with the JSON encoder. -func (qv Value) Base64EncodeBytes(v []byte) { - qv.queryValue.Blob(v) -} - -// Boolean encodes v as a query string value -func (qv Value) Boolean(v bool) { - qv.queryValue.Boolean(v) -} - -// String encodes v as a query string value -func (qv Value) String(v string) { - qv.queryValue.String(v) -} - -// Byte encodes v as a query string value -func (qv Value) Byte(v int8) { - qv.queryValue.Byte(v) -} - -// Short encodes v as a query string value -func (qv Value) Short(v int16) { - qv.queryValue.Short(v) -} - -// Integer encodes v as a query string value -func (qv Value) Integer(v int32) { - qv.queryValue.Integer(v) -} - -// Long encodes v as a query string value -func (qv Value) Long(v int64) { - qv.queryValue.Long(v) -} - -// Float encodes v as a query string value -func (qv Value) Float(v float32) { - qv.queryValue.Float(v) -} - -// Double encodes v as a query string value -func (qv Value) Double(v float64) { - qv.queryValue.Double(v) -} - -// BigInteger encodes v as a query string value -func (qv Value) BigInteger(v *big.Int) { - qv.queryValue.BigInteger(v) -} - -// BigDecimal encodes v as a query string value -func (qv Value) BigDecimal(v *big.Float) { - qv.queryValue.BigDecimal(v) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go deleted file mode 100644 index 1bce78a4d45b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go +++ /dev/null @@ -1,85 +0,0 @@ -package restjson - -import ( - "encoding/json" - "io" - "strings" - - "github.com/aws/smithy-go" -) - -// GetErrorInfo util looks for code, __type, and message members in the -// json body. These members are optionally available, and the function -// returns the value of member if it is available. This function is useful to -// identify the error code, msg in a REST JSON error response. -func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { - var errInfo struct { - Code string - Type string `json:"__type"` - Message string - } - - err = decoder.Decode(&errInfo) - if err != nil { - if err == io.EOF { - return errorType, message, nil - } - return errorType, message, err - } - - // assign error type - if len(errInfo.Code) != 0 { - errorType = errInfo.Code - } else if len(errInfo.Type) != 0 { - errorType = errInfo.Type - } - - // assign error message - if len(errInfo.Message) != 0 { - message = errInfo.Message - } - - // sanitize error - if len(errorType) != 0 { - errorType = SanitizeErrorCode(errorType) - } - - return errorType, message, nil -} - -// SanitizeErrorCode sanitizes the errorCode string . -// The rule for sanitizing is if a `:` character is present, then take only the -// contents before the first : character in the value. -// If a # character is present, then take only the contents after the -// first # character in the value. -func SanitizeErrorCode(errorCode string) string { - if strings.ContainsAny(errorCode, ":") { - errorCode = strings.SplitN(errorCode, ":", 2)[0] - } - - if strings.ContainsAny(errorCode, "#") { - errorCode = strings.SplitN(errorCode, "#", 2)[1] - } - - return errorCode -} - -// GetSmithyGenericAPIError returns smithy generic api error and an error interface. -// Takes in json decoder, and error Code string as args. The function retrieves error message -// and error code from the decoder body. If errorCode of length greater than 0 is passed in as -// an argument, it is used instead. -func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { - errorType, message, err := GetErrorInfo(decoder) - if err != nil { - return nil, err - } - - if len(errorCode) == 0 { - errorCode = errorType - } - - return &smithy.GenericAPIError{ - Code: errorCode, - Message: message, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go deleted file mode 100644 index 6975ce6524d9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go +++ /dev/null @@ -1,48 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "io" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string - Message string - RequestID string -} - -// GetErrorResponseComponents returns the error fields from an xml error response body -func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { - if noErrorWrapping { - var errResponse noWrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents(errResponse), nil - } - - var errResponse wrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents(errResponse), nil -} - -// noWrappedErrorResponse represents the error response body with -// no internal Error wrapping -type noWrappedErrorResponse struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - RequestID string `xml:"RequestId"` -} - -// wrappedErrorResponse represents the error response body -// wrapped within Error -type wrappedErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go deleted file mode 100644 index 8c78364105bf..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go +++ /dev/null @@ -1,20 +0,0 @@ -package ratelimit - -import "context" - -// None implements a no-op rate limiter which effectively disables client-side -// rate limiting (also known as "retry quotas"). -// -// GetToken does nothing and always returns a nil error. The returned -// token-release function does nothing, and always returns a nil error. -// -// AddTokens does nothing and always returns a nil error. -var None = &none{} - -type none struct{} - -func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) { - return func() error { return nil }, nil -} - -func (*none) AddTokens(v uint) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go deleted file mode 100644 index 974ef594f071..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go +++ /dev/null @@ -1,96 +0,0 @@ -package ratelimit - -import ( - "sync" -) - -// TokenBucket provides a concurrency safe utility for adding and removing -// tokens from the available token bucket. -type TokenBucket struct { - remainingTokens uint - maxCapacity uint - minCapacity uint - mu sync.Mutex -} - -// NewTokenBucket returns an initialized TokenBucket with the capacity -// specified. -func NewTokenBucket(i uint) *TokenBucket { - return &TokenBucket{ - remainingTokens: i, - maxCapacity: i, - minCapacity: 1, - } -} - -// Retrieve attempts to reduce the available tokens by the amount requested. If -// there are tokens available true will be returned along with the number of -// available tokens remaining. If amount requested is larger than the available -// capacity, false will be returned along with the available capacity. If the -// amount is less than the available capacity, the capacity will be reduced by -// that amount, and the remaining capacity and true will be returned. -func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { - t.mu.Lock() - defer t.mu.Unlock() - - if amount > t.remainingTokens { - return t.remainingTokens, false - } - - t.remainingTokens -= amount - return t.remainingTokens, true -} - -// Refund returns the amount of tokens back to the available token bucket, up -// to the initial capacity. -func (t *TokenBucket) Refund(amount uint) { - t.mu.Lock() - defer t.mu.Unlock() - - // Capacity cannot exceed max capacity. - t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) -} - -// Capacity returns the maximum capacity of tokens that the bucket could -// contain. -func (t *TokenBucket) Capacity() uint { - t.mu.Lock() - defer t.mu.Unlock() - - return t.maxCapacity -} - -// Remaining returns the number of tokens that remaining in the bucket. -func (t *TokenBucket) Remaining() uint { - t.mu.Lock() - defer t.mu.Unlock() - - return t.remainingTokens -} - -// Resize adjusts the size of the token bucket. Returns the capacity remaining. -func (t *TokenBucket) Resize(size uint) uint { - t.mu.Lock() - defer t.mu.Unlock() - - t.maxCapacity = uintMax(size, t.minCapacity) - - // Capacity needs to be capped at max capacity, if max size reduced. - t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) - - return t.remainingTokens -} - -func uintMin(a, b uint) uint { - if a < b { - return a - } - return b -} - -func uintMax(a, b uint) uint { - if a > b { - return a - } - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go deleted file mode 100644 index d89090ad38e6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go +++ /dev/null @@ -1,83 +0,0 @@ -package ratelimit - -import ( - "context" - "fmt" -) - -type rateToken struct { - tokenCost uint - bucket *TokenBucket -} - -func (t rateToken) release() error { - t.bucket.Refund(t.tokenCost) - return nil -} - -// TokenRateLimit provides a Token Bucket RateLimiter implementation -// that limits the overall number of retry attempts that can be made across -// operation invocations. -type TokenRateLimit struct { - bucket *TokenBucket -} - -// NewTokenRateLimit returns an TokenRateLimit with default values. -// Functional options can configure the retry rate limiter. -func NewTokenRateLimit(tokens uint) *TokenRateLimit { - return &TokenRateLimit{ - bucket: NewTokenBucket(tokens), - } -} - -type canceledError struct { - Err error -} - -func (c canceledError) CanceledError() bool { return true } -func (c canceledError) Unwrap() error { return c.Err } -func (c canceledError) Error() string { - return fmt.Sprintf("canceled, %v", c.Err) -} - -// GetToken may cause a available pool of retry quota to be -// decremented. Will return an error if the decremented value can not be -// reduced from the retry quota. -func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { - select { - case <-ctx.Done(): - return nil, canceledError{Err: ctx.Err()} - default: - } - if avail, ok := l.bucket.Retrieve(cost); !ok { - return nil, QuotaExceededError{Available: avail, Requested: cost} - } - - return rateToken{ - tokenCost: cost, - bucket: l.bucket, - }.release, nil -} - -// AddTokens increments the token bucket by a fixed amount. -func (l *TokenRateLimit) AddTokens(v uint) error { - l.bucket.Refund(v) - return nil -} - -// Remaining returns the number of remaining tokens in the bucket. -func (l *TokenRateLimit) Remaining() uint { - return l.bucket.Remaining() -} - -// QuotaExceededError provides the SDK error when the retries for a given -// token bucket have been exhausted. -type QuotaExceededError struct { - Available uint - Requested uint -} - -func (e QuotaExceededError) Error() string { - return fmt.Sprintf("retry quota exceeded, %d available, %d requested", - e.Available, e.Requested) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go deleted file mode 100644 index d8d00e615823..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go +++ /dev/null @@ -1,25 +0,0 @@ -package aws - -import ( - "fmt" -) - -// TODO remove replace with smithy.CanceledError - -// RequestCanceledError is the error that will be returned by an API request -// that was canceled. Requests given a Context may return this error when -// canceled. -type RequestCanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*RequestCanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *RequestCanceledError) Unwrap() error { - return e.Err -} -func (e *RequestCanceledError) Error() string { - return fmt.Sprintf("request canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go deleted file mode 100644 index 4dfde8573739..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go +++ /dev/null @@ -1,156 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -const ( - // DefaultRequestCost is the cost of a single request from the adaptive - // rate limited token bucket. - DefaultRequestCost uint = 1 -) - -// DefaultThrottles provides the set of errors considered throttle errors that -// are checked by default. -var DefaultThrottles = []IsErrorThrottle{ - ThrottleErrorCode{ - Codes: DefaultThrottleErrorCodes, - }, -} - -// AdaptiveModeOptions provides the functional options for configuring the -// adaptive retry mode, and delay behavior. -type AdaptiveModeOptions struct { - // If the adaptive token bucket is empty, when an attempt will be made - // AdaptiveMode will sleep until a token is available. This can occur when - // attempts fail with throttle errors. Use this option to disable the sleep - // until token is available, and return error immediately. - FailOnNoAttemptTokens bool - - // The cost of an attempt from the AdaptiveMode's adaptive token bucket. - RequestCost uint - - // Set of strategies to determine if the attempt failed due to a throttle - // error. - // - // It is safe to append to this list in NewAdaptiveMode's functional options. - Throttles []IsErrorThrottle - - // Set of options for standard retry mode that AdaptiveMode is built on top - // of. AdaptiveMode may apply its own defaults to Standard retry mode that - // are different than the defaults of NewStandard. Use these options to - // override the default options. - StandardOptions []func(*StandardOptions) -} - -// AdaptiveMode provides an experimental retry strategy that expands on the -// Standard retry strategy, adding client attempt rate limits. The attempt rate -// limit is initially unrestricted, but becomes restricted when the attempt -// fails with for a throttle error. When restricted AdaptiveMode may need to -// sleep before an attempt is made, if too many throttles have been received. -// AdaptiveMode's sleep can be canceled with context cancel. Set -// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, -// to fail fast. -// -// Eventually unrestricted attempt rate limit will be restored once attempts no -// longer are failing due to throttle errors. -type AdaptiveMode struct { - options AdaptiveModeOptions - throttles IsErrorThrottles - - retryer aws.RetryerV2 - rateLimit *adaptiveRateLimit -} - -// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. -func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { - o := AdaptiveModeOptions{ - RequestCost: DefaultRequestCost, - Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), - } - for _, fn := range optFns { - fn(&o) - } - - return &AdaptiveMode{ - options: o, - throttles: IsErrorThrottles(o.Throttles), - retryer: NewStandard(o.StandardOptions...), - rateLimit: newAdaptiveRateLimit(), - } -} - -// IsErrorRetryable returns if the failed attempt is retryable. This check -// should determine if the error can be retried, or if the error is -// terminal. -func (a *AdaptiveMode) IsErrorRetryable(err error) bool { - return a.retryer.IsErrorRetryable(err) -} - -// MaxAttempts returns the maximum number of attempts that can be made for -// an attempt before failing. A value of 0 implies that the attempt should -// be retried until it succeeds if the errors are retryable. -func (a *AdaptiveMode) MaxAttempts() int { - return a.retryer.MaxAttempts() -} - -// RetryDelay returns the delay that should be used before retrying the -// attempt. Will return error if the if the delay could not be determined. -func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( - time.Duration, error, -) { - return a.retryer.RetryDelay(attempt, opErr) -} - -// GetRetryToken attempts to deduct the retry cost from the retry token pool. -// Returning the token release function, or error. -func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( - releaseToken func(error) error, err error, -) { - return a.retryer.GetRetryToken(ctx, opErr) -} - -// GetInitialToken returns the initial attempt token that can increment the -// retry token pool if the attempt is successful. -// -// Deprecated: This method does not provide a way to block using Context, -// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only -// present to implement Retryer interface. -func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { - return nopRelease -} - -// GetAttemptToken returns the attempt token that can be used to rate limit -// attempt calls. Will be used by the SDK's retry package's Attempt -// middleware to get an attempt token prior to calling the temp and releasing -// the attempt token after the attempt has been made. -func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { - for { - acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) - if acquiredToken { - break - } - if a.options.FailOnNoAttemptTokens { - return nil, fmt.Errorf( - "unable to get attempt token, and FailOnNoAttemptTokens enables") - } - - if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { - return nil, fmt.Errorf("failed to wait for token to be available, %w", err) - } - } - - return a.handleResponse, nil -} - -func (a *AdaptiveMode) handleResponse(opErr error) error { - throttled := a.throttles.IsErrorThrottle(opErr).Bool() - - a.rateLimit.Update(throttled) - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go deleted file mode 100644 index ad96d9b8c5d6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go +++ /dev/null @@ -1,158 +0,0 @@ -package retry - -import ( - "math" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -type adaptiveRateLimit struct { - tokenBucketEnabled bool - - smooth float64 - beta float64 - scaleConstant float64 - minFillRate float64 - - fillRate float64 - calculatedRate float64 - lastRefilled time.Time - measuredTxRate float64 - lastTxRateBucket float64 - requestCount int64 - lastMaxRate float64 - lastThrottleTime time.Time - timeWindow float64 - - tokenBucket *adaptiveTokenBucket - - mu sync.Mutex -} - -func newAdaptiveRateLimit() *adaptiveRateLimit { - now := sdk.NowTime() - return &adaptiveRateLimit{ - smooth: 0.8, - beta: 0.7, - scaleConstant: 0.4, - - minFillRate: 0.5, - - lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), - lastThrottleTime: now, - - tokenBucket: newAdaptiveTokenBucket(0), - } -} - -func (a *adaptiveRateLimit) Enable(v bool) { - a.mu.Lock() - defer a.mu.Unlock() - - a.tokenBucketEnabled = v -} - -func (a *adaptiveRateLimit) AcquireToken(amount uint) ( - tokenAcquired bool, waitTryAgain time.Duration, -) { - a.mu.Lock() - defer a.mu.Unlock() - - if !a.tokenBucketEnabled { - return true, 0 - } - - a.tokenBucketRefill() - - available, ok := a.tokenBucket.Retrieve(float64(amount)) - if !ok { - waitDur := float64Seconds((float64(amount) - available) / a.fillRate) - return false, waitDur - } - - return true, 0 -} - -func (a *adaptiveRateLimit) Update(throttled bool) { - a.mu.Lock() - defer a.mu.Unlock() - - a.updateMeasuredRate() - - if throttled { - rateToUse := a.measuredTxRate - if a.tokenBucketEnabled { - rateToUse = math.Min(a.measuredTxRate, a.fillRate) - } - - a.lastMaxRate = rateToUse - a.calculateTimeWindow() - a.lastThrottleTime = sdk.NowTime() - a.calculatedRate = a.cubicThrottle(rateToUse) - a.tokenBucketEnabled = true - } else { - a.calculateTimeWindow() - a.calculatedRate = a.cubicSuccess(sdk.NowTime()) - } - - newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) - a.tokenBucketUpdateRate(newRate) -} - -func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { - dt := secondsFloat64(t.Sub(a.lastThrottleTime)) - return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate -} - -func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { - return rateToUse * a.beta -} - -func (a *adaptiveRateLimit) calculateTimeWindow() { - a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) -} - -func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { - a.tokenBucketRefill() - a.fillRate = math.Max(newRPS, a.minFillRate) - a.tokenBucket.Resize(newRPS) -} - -func (a *adaptiveRateLimit) updateMeasuredRate() { - now := sdk.NowTime() - timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. - a.requestCount++ - - if timeBucket > a.lastTxRateBucket { - currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) - a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) - a.requestCount = 0 - a.lastTxRateBucket = timeBucket - } -} - -func (a *adaptiveRateLimit) tokenBucketRefill() { - now := sdk.NowTime() - if a.lastRefilled.IsZero() { - a.lastRefilled = now - return - } - - fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate - a.tokenBucket.Refund(fillAmount) - a.lastRefilled = now -} - -func float64Seconds(v float64) time.Duration { - return time.Duration(v * float64(time.Second)) -} - -func secondsFloat64(v time.Duration) float64 { - return float64(v) / float64(time.Second) -} - -func timeFloat64Seconds(v time.Time) float64 { - return float64(v.UnixNano()) / float64(time.Second) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go deleted file mode 100644 index 052723e8ed1e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go +++ /dev/null @@ -1,83 +0,0 @@ -package retry - -import ( - "math" - "sync" -) - -// adaptiveTokenBucket provides a concurrency safe utility for adding and -// removing tokens from the available token bucket. -type adaptiveTokenBucket struct { - remainingTokens float64 - maxCapacity float64 - minCapacity float64 - mu sync.Mutex -} - -// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the -// capacity specified. -func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { - return &adaptiveTokenBucket{ - remainingTokens: i, - maxCapacity: i, - minCapacity: 1, - } -} - -// Retrieve attempts to reduce the available tokens by the amount requested. If -// there are tokens available true will be returned along with the number of -// available tokens remaining. If amount requested is larger than the available -// capacity, false will be returned along with the available capacity. If the -// amount is less than the available capacity, the capacity will be reduced by -// that amount, and the remaining capacity and true will be returned. -func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { - t.mu.Lock() - defer t.mu.Unlock() - - if amount > t.remainingTokens { - return t.remainingTokens, false - } - - t.remainingTokens -= amount - return t.remainingTokens, true -} - -// Refund returns the amount of tokens back to the available token bucket, up -// to the initial capacity. -func (t *adaptiveTokenBucket) Refund(amount float64) { - t.mu.Lock() - defer t.mu.Unlock() - - // Capacity cannot exceed max capacity. - t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) -} - -// Capacity returns the maximum capacity of tokens that the bucket could -// contain. -func (t *adaptiveTokenBucket) Capacity() float64 { - t.mu.Lock() - defer t.mu.Unlock() - - return t.maxCapacity -} - -// Remaining returns the number of tokens that remaining in the bucket. -func (t *adaptiveTokenBucket) Remaining() float64 { - t.mu.Lock() - defer t.mu.Unlock() - - return t.remainingTokens -} - -// Resize adjusts the size of the token bucket. Returns the capacity remaining. -func (t *adaptiveTokenBucket) Resize(size float64) float64 { - t.mu.Lock() - defer t.mu.Unlock() - - t.maxCapacity = math.Max(size, t.minCapacity) - - // Capacity needs to be capped at max capacity, if max size reduced. - t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) - - return t.remainingTokens -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go deleted file mode 100644 index bfa5bf7d130e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go +++ /dev/null @@ -1,51 +0,0 @@ -package retry - -import ( - "context" - - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" -) - -type attemptMetrics struct { - Attempts metrics.Int64Counter - Errors metrics.Int64Counter - - AttemptDuration metrics.Float64Histogram -} - -func newAttemptMetrics(meter metrics.Meter) (*attemptMetrics, error) { - m := &attemptMetrics{} - var err error - - m.Attempts, err = meter.Int64Counter("client.call.attempts", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "{attempt}" - o.Description = "The number of attempts for an individual operation" - }) - if err != nil { - return nil, err - } - m.Errors, err = meter.Int64Counter("client.call.errors", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "{error}" - o.Description = "The number of errors for an operation" - }) - if err != nil { - return nil, err - } - m.AttemptDuration, err = meter.Float64Histogram("client.call.attempt_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes to connect to the service, send the request, and get back HTTP status code and headers (including time queued waiting to be sent)" - }) - if err != nil { - return nil, err - } - - return m, nil -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go deleted file mode 100644 index 3a08ebe0a727..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Package retry provides interfaces and implementations for SDK request retry behavior. -// -// # Retryer Interface and Implementations -// -// This package defines Retryer interface that is used to either implement custom retry behavior -// or to extend the existing retry implementations provided by the SDK. This package provides a single -// retry implementation: Standard. -// -// # Standard -// -// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited -// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. -// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, -// and uses an additional delay policy to limit the time between a requests subsequent attempts. -// -// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether -// a given error is retryable. By default this list of retryables includes the following: -// - Retrying errors that implement the RetryableError method, and return true. -// - Connection Errors -// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. -// - Connection Reset Errors. -// - net.OpErr types that are dialing errors or are temporary. -// - HTTP Status Codes: 500, 502, 503, and 504. -// - API Error Codes -// - RequestTimeout, RequestTimeoutException -// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, -// RequestThrottled, SlowDown, EC2ThrottledException -// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException -// - TransactionInProgressException, PriorRequestNotComplete -// -// The standard retryer will not retry a request in the event if the context associated with the request -// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context -// value. -// -// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer -// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions -// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, -// and the retry delay policy. -// -// For example to modify the default retry attempts for the standard retryer: -// -// // configure the custom retryer -// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { -// o.MaxAttempts = 5 -// }) -// -// // create a service client with the retryer -// s3.NewFromConfig(cfg, func(o *s3.Options) { -// o.Retryer = customRetry -// }) -// -// # Utilities -// -// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic -// way. These are: -// -// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable -// in addition to those considered retryable by the provided retryer. -// -// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping -// a retryer implementation. -// -// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a -// request by wrapping a retryer implementation. -// -// The following package functions have been provided to easily satisfy different retry interfaces to further customize -// a given retryer's behavior: -// -// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, -// you can use this method to easily create custom back off policies to be used with the -// standard retryer. -// -// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be retried. -// -// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be considered a timeout. -package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go deleted file mode 100644 index 3e432eefe77f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -package retry - -import "fmt" - -// MaxAttemptsError provides the error when the maximum number of attempts have -// been exceeded. -type MaxAttemptsError struct { - Attempt int - Err error -} - -func (e *MaxAttemptsError) Error() string { - return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) -} - -// Unwrap returns the nested error causing the max attempts error. Provides the -// implementation for errors.Is and errors.As to unwrap nested errors. -func (e *MaxAttemptsError) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go deleted file mode 100644 index c266996dea23..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go +++ /dev/null @@ -1,49 +0,0 @@ -package retry - -import ( - "math" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/timeconv" -) - -// ExponentialJitterBackoff provides backoff delays with jitter based on the -// number of attempts. -type ExponentialJitterBackoff struct { - maxBackoff time.Duration - // precomputed number of attempts needed to reach max backoff. - maxBackoffAttempts float64 - - randFloat64 func() (float64, error) -} - -// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured -// for the max backoff. -func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { - return &ExponentialJitterBackoff{ - maxBackoff: maxBackoff, - maxBackoffAttempts: math.Log2( - float64(maxBackoff) / float64(time.Second)), - randFloat64: rand.CryptoRandFloat64, - } -} - -// BackoffDelay returns the duration to wait before the next attempt should be -// made. Returns an error if unable get a duration. -func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { - if attempt > int(j.maxBackoffAttempts) { - return j.maxBackoff, nil - } - - b, err := j.randFloat64() - if err != nil { - return 0, err - } - - // [0.0, 1.0) * 2 ^ attempts - ri := int64(1 << uint64(attempt)) - delaySeconds := b * float64(ri) - - return timeconv.FloatSecondsDur(delaySeconds), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go deleted file mode 100644 index 7a3f18301863..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -package retry - -import ( - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" -) - -// attemptResultsKey is a metadata accessor key to retrieve metadata -// for all request attempts. -type attemptResultsKey struct { -} - -// GetAttemptResults retrieves attempts results from middleware metadata. -func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { - m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) - return m, ok -} - -// AttemptResults represents struct containing metadata returned by all request attempts. -type AttemptResults struct { - - // Results is a slice consisting attempt result from all request attempts. - // Results are stored in order request attempt is made. - Results []AttemptResult -} - -// AttemptResult represents attempt result returned by a single request attempt. -type AttemptResult struct { - - // Err is the error if received for the request attempt. - Err error - - // Retryable denotes if request may be retried. This states if an - // error is considered retryable. - Retryable bool - - // Retried indicates if this request was retried. - Retried bool - - // ResponseMetadata is any existing metadata passed via the response middlewares. - ResponseMetadata middleware.Metadata -} - -// addAttemptResults adds attempt results to middleware metadata -func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { - metadata.Set(attemptResultsKey{}, v) -} - -// GetRawResponse returns raw response recorded for the attempt result -func (a AttemptResult) GetRawResponse() interface{} { - return awsmiddle.GetRawResponse(a.ResponseMetadata) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go deleted file mode 100644 index 5549922ab81a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ /dev/null @@ -1,418 +0,0 @@ -package retry - -import ( - "context" - "errors" - "fmt" - "strconv" - "strings" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - smithymiddle "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - "github.com/aws/smithy-go/transport/http" -) - -// RequestCloner is a function that can take an input request type and clone -// the request for use in a subsequent retry attempt. -type RequestCloner func(interface{}) interface{} - -type retryMetadata struct { - AttemptNum int - AttemptTime time.Time - MaxAttempts int - AttemptClockSkew time.Duration -} - -// Attempt is a Smithy Finalize middleware that handles retry attempts using -// the provided Retryer implementation. -type Attempt struct { - // Enable the logging of retry attempts performed by the SDK. This will - // include logging retry attempts, unretryable errors, and when max - // attempts are reached. - LogAttempts bool - - // A Meter instance for recording retry-related metrics. - OperationMeter metrics.Meter - - retryer aws.RetryerV2 - requestCloner RequestCloner -} - -// define the threshold at which we will consider certain kind of errors to be probably -// caused by clock skew -const skewThreshold = 4 * time.Minute - -// NewAttemptMiddleware returns a new Attempt retry middleware. -func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { - m := &Attempt{ - retryer: wrapAsRetryerV2(retryer), - requestCloner: requestCloner, - } - for _, fn := range optFns { - fn(m) - } - if m.OperationMeter == nil { - m.OperationMeter = metrics.NopMeterProvider{}.Meter("") - } - - return m -} - -// ID returns the middleware identifier -func (r *Attempt) ID() string { return "Retry" } - -func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { - if !r.LogAttempts { - return - } - logger.Logf(classification, format, v...) -} - -// HandleFinalize utilizes the provider Retryer implementation to attempt -// retries over the next handler -func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( - out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, -) { - var attemptNum int - var attemptClockSkew time.Duration - var attemptResults AttemptResults - - maxAttempts := r.retryer.MaxAttempts() - releaseRetryToken := nopRelease - - retryMetrics, err := newAttemptMetrics(r.OperationMeter) - if err != nil { - return out, metadata, err - } - - for { - attemptNum++ - attemptInput := in - attemptInput.Request = r.requestCloner(attemptInput.Request) - - // Record the metadata for the for attempt being started. - attemptCtx := setRetryMetadata(ctx, retryMetadata{ - AttemptNum: attemptNum, - AttemptTime: sdk.NowTime().UTC(), - MaxAttempts: maxAttempts, - AttemptClockSkew: attemptClockSkew, - }) - - // Setting clock skew to be used on other context (like signing) - ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) - - var attemptResult AttemptResult - - attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) { - o.Properties.Set("operation.attempt", attemptNum) - }) - retryMetrics.Attempts.Add(ctx, 1, withOperationMetadata(ctx)) - - start := sdk.NowTime() - out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) - elapsed := sdk.NowTime().Sub(start) - - retryMetrics.AttemptDuration.Record(ctx, float64(elapsed)/1e9, withOperationMetadata(ctx)) - if err != nil { - retryMetrics.Errors.Add(ctx, 1, withOperationMetadata(ctx), func(o *metrics.RecordMetricOptions) { - o.Properties.Set("exception.type", errorType(err)) - }) - } - - span.End() - - attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) - - // AttemptResult Retried states that the attempt was not successful, and - // should be retried. - shouldRetry := attemptResult.Retried - - // Add attempt metadata to list of all attempt metadata - attemptResults.Results = append(attemptResults.Results, attemptResult) - - if !shouldRetry { - // Ensure the last response's metadata is used as the bases for result - // metadata returned by the stack. The Slice of attempt results - // will be added to this cloned metadata. - metadata = attemptResult.ResponseMetadata.Clone() - - break - } - } - - addAttemptResults(&metadata, attemptResults) - return out, metadata, err -} - -// handleAttempt handles an individual request attempt. -func (r *Attempt) handleAttempt( - ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, -) ( - out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, -) { - defer func() { - attemptResult.Err = err - }() - - // Short circuit if this attempt never can succeed because the context is - // canceled. This reduces the chance of token pools being modified for - // attempts that will not be made - select { - case <-ctx.Done(): - return out, attemptResult, nopRelease, ctx.Err() - default: - } - - //------------------------------ - // Get Attempt Token - //------------------------------ - releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) - if err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to get retry Send token, %w", err) - } - - //------------------------------ - // Send Attempt - //------------------------------ - logger := smithymiddle.GetLogger(ctx) - service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) - retryMetadata, _ := getRetryMetadata(ctx) - attemptNum := retryMetadata.AttemptNum - maxAttempts := retryMetadata.MaxAttempts - - // Following attempts must ensure the request payload stream starts in a - // rewound state. - if attemptNum > 1 { - if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { - if rewindErr := rewindable.RewindStream(); rewindErr != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to rewind transport stream for retry, %w", rewindErr) - } - } - - r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", - service, operation, attemptNum) - } - - var metadata smithymiddle.Metadata - out, metadata, err = next.HandleFinalize(ctx, in) - attemptResult.ResponseMetadata = metadata - - //------------------------------ - // Bookkeeping - //------------------------------ - // Release the retry token based on the state of the attempt's error (if any). - if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to release retry token after request error, %w", err) - } - // Release the attempt token based on the state of the attempt's error (if any). - if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to release initial token after request error, %w", err) - } - // If there was no error making the attempt, nothing further to do. There - // will be nothing to retry. - if err == nil { - return out, attemptResult, nopRelease, err - } - - err = wrapAsClockSkew(ctx, err) - - //------------------------------ - // Is Retryable and Should Retry - //------------------------------ - // If the attempt failed with an unretryable error, nothing further to do - // but return, and inform the caller about the terminal failure. - retryable := r.retryer.IsErrorRetryable(err) - if !retryable { - r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) - return out, attemptResult, nopRelease, err - } - - // set retryable to true - attemptResult.Retryable = true - - // Once the maximum number of attempts have been exhausted there is nothing - // further to do other than inform the caller about the terminal failure. - if maxAttempts > 0 && attemptNum >= maxAttempts { - r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) - err = &MaxAttemptsError{ - Attempt: attemptNum, - Err: err, - } - return out, attemptResult, nopRelease, err - } - - //------------------------------ - // Get Retry (aka Retry Quota) Token - //------------------------------ - // Get a retry token that will be released after the - releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) - if retryTokenErr != nil { - return out, attemptResult, nopRelease, errors.Join(err, retryTokenErr) - } - - //------------------------------ - // Retry Delay and Sleep - //------------------------------ - // Get the retry delay before another attempt can be made, and sleep for - // that time. Potentially early exist if the sleep is canceled via the - // context. - retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) - if reqErr != nil { - return out, attemptResult, releaseRetryToken, reqErr - } - if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { - err = &aws.RequestCanceledError{Err: reqErr} - return out, attemptResult, releaseRetryToken, err - } - - // The request should be re-attempted. - attemptResult.Retried = true - - return out, attemptResult, releaseRetryToken, err -} - -// errors that, if detected when we know there's a clock skew, -// can be retried and have a high chance of success -var possibleSkewCodes = map[string]struct{}{ - "InvalidSignatureException": {}, - "SignatureDoesNotMatch": {}, - "AuthFailure": {}, -} - -var definiteSkewCodes = map[string]struct{}{ - "RequestExpired": {}, - "RequestInTheFuture": {}, - "RequestTimeTooSkewed": {}, -} - -// wrapAsClockSkew checks if this error could be related to a clock skew -// error and if so, wrap the error. -func wrapAsClockSkew(ctx context.Context, err error) error { - var v interface{ ErrorCode() string } - if !errors.As(err, &v) { - return err - } - if _, ok := definiteSkewCodes[v.ErrorCode()]; ok { - return &retryableClockSkewError{Err: err} - } - _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()] - if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode { - return &retryableClockSkewError{Err: err} - } - return err -} - -// MetricsHeader attaches SDK request metric header for retries to the transport -type MetricsHeader struct{} - -// ID returns the middleware identifier -func (r *MetricsHeader) ID() string { - return "RetryMetricsHeader" -} - -// HandleFinalize attaches the SDK request metric header to the transport layer -func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( - out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, -) { - retryMetadata, _ := getRetryMetadata(ctx) - - const retryMetricHeader = "Amz-Sdk-Request" - var parts []string - - parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) - if retryMetadata.MaxAttempts != 0 { - parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) - } - - var ttl time.Time - if deadline, ok := ctx.Deadline(); ok { - ttl = deadline - } - - // Only append the TTL if it can be determined. - if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { - const unixTimeFormat = "20060102T150405Z" - ttl = ttl.Add(retryMetadata.AttemptClockSkew) - parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) - } - - switch req := in.Request.(type) { - case *http.Request: - req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) - default: - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - return next.HandleFinalize(ctx, in) -} - -type retryMetadataKey struct{} - -// getRetryMetadata retrieves retryMetadata from the context and a bool -// indicating if it was set. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { - metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) - return metadata, ok -} - -// setRetryMetadata sets the retryMetadata on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { - return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) -} - -// AddRetryMiddlewaresOptions is the set of options that can be passed to -// AddRetryMiddlewares for configuring retry associated middleware. -type AddRetryMiddlewaresOptions struct { - Retryer aws.Retryer - - // Enable the logging of retry attempts performed by the SDK. This will - // include logging retry attempts, unretryable errors, and when max - // attempts are reached. - LogRetryAttempts bool -} - -// AddRetryMiddlewares adds retry middleware to operation middleware stack -func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { - attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { - middleware.LogAttempts = options.LogRetryAttempts - }) - - // index retry to before signing, if signing exists - if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil { - return err - } - - if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil { - return err - } - return nil -} - -// Determines the value of exception.type for metrics purposes. We prefer an -// API-specific error code, otherwise it's just the Go type for the value. -func errorType(err error) string { - var terr smithy.APIError - if errors.As(err, &terr) { - return terr.ErrorCode() - } - return fmt.Sprintf("%T", err) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go deleted file mode 100644 index af81635b3fdd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go +++ /dev/null @@ -1,90 +0,0 @@ -package retry - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// AddWithErrorCodes returns a Retryer with additional error codes considered -// for determining if the error should be retried. -func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { - retryable := &RetryableErrorCode{ - Codes: map[string]struct{}{}, - } - for _, c := range codes { - retryable.Codes[c] = struct{}{} - } - - return &withIsErrorRetryable{ - RetryerV2: wrapAsRetryerV2(r), - Retryable: retryable, - } -} - -type withIsErrorRetryable struct { - aws.RetryerV2 - Retryable IsErrorRetryable -} - -func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { - if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { - return v.Bool() - } - return r.RetryerV2.IsErrorRetryable(err) -} - -// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value -// specified. -func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { - return &withMaxAttempts{ - RetryerV2: wrapAsRetryerV2(r), - Max: max, - } -} - -type withMaxAttempts struct { - aws.RetryerV2 - Max int -} - -func (w *withMaxAttempts) MaxAttempts() int { - return w.Max -} - -// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer -// overriding the RetryDelay behavior for a alternate minimum initial backoff -// delay. -func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { - return &withMaxBackoffDelay{ - RetryerV2: wrapAsRetryerV2(r), - backoff: NewExponentialJitterBackoff(delay), - } -} - -type withMaxBackoffDelay struct { - aws.RetryerV2 - backoff *ExponentialJitterBackoff -} - -func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { - return r.backoff.BackoffDelay(attempt, err) -} - -type wrappedAsRetryerV2 struct { - aws.Retryer -} - -func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { - v, ok := r.(aws.RetryerV2) - if !ok { - v = wrappedAsRetryerV2{Retryer: r} - } - - return v -} - -func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { - return w.Retryer.GetInitialToken(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go deleted file mode 100644 index 1b485f99884a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ /dev/null @@ -1,228 +0,0 @@ -package retry - -import ( - "errors" - "fmt" - "net" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorRetryable provides the interface of an implementation to determine if -// a error as the result of an operation is retryable. -type IsErrorRetryable interface { - IsErrorRetryable(error) aws.Ternary -} - -// IsErrorRetryables is a collection of checks to determine of the error is -// retryable. Iterates through the checks and returns the state of retryable -// if any check returns something other than unknown. -type IsErrorRetryables []IsErrorRetryable - -// IsErrorRetryable returns if the error is retryable if any of the checks in -// the list return a value other than unknown. -func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { - for _, re := range r { - if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. -type IsErrorRetryableFunc func(error) aws.Ternary - -// IsErrorRetryable returns if the error is retryable. -func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { - return fn(err) -} - -// RetryableError is an IsErrorRetryable implementation which uses the -// optional interface Retryable on the error value to determine if the error is -// retryable. -type RetryableError struct{} - -// IsErrorRetryable returns if the error is retryable if it satisfies the -// Retryable interface, and returns if the attempt should be retried. -func (RetryableError) IsErrorRetryable(err error) aws.Ternary { - var v interface{ RetryableError() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - return aws.BoolTernary(v.RetryableError()) -} - -// NoRetryCanceledError detects if the error was an request canceled error and -// returns if so. -type NoRetryCanceledError struct{} - -// IsErrorRetryable returns the error is not retryable if the request was -// canceled. -func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { - var v interface{ CanceledError() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - if v.CanceledError() { - return aws.FalseTernary - } - return aws.UnknownTernary -} - -// RetryableConnectionError determines if the underlying error is an HTTP -// connection and returns if it should be retried. -// -// Includes errors such as connection reset, connection refused, net dial, -// temporary, and timeout errors. -type RetryableConnectionError struct{} - -// IsErrorRetryable returns if the error is caused by and HTTP connection -// error, and should be retried. -func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { - if err == nil { - return aws.UnknownTernary - } - var retryable bool - - var conErr interface{ ConnectionError() bool } - var tempErr interface{ Temporary() bool } - var timeoutErr interface{ Timeout() bool } - var urlErr *url.Error - var netOpErr *net.OpError - var dnsError *net.DNSError - - if errors.As(err, &dnsError) { - // NXDOMAIN errors should not be retried - if dnsError.IsNotFound { - return aws.BoolTernary(false) - } - - // if !dnsError.Temporary(), error may or may not be temporary, - // (i.e. !Temporary() =/=> !retryable) so we should fall through to - // remaining checks - if dnsError.Temporary() { - return aws.BoolTernary(true) - } - } - - switch { - case errors.As(err, &conErr) && conErr.ConnectionError(): - retryable = true - - case strings.Contains(err.Error(), "use of closed network connection"): - fallthrough - case strings.Contains(err.Error(), "connection reset"): - // The errors "connection reset" and "use of closed network connection" - // are effectively the same. It appears to be the difference between - // sync and async read of TCP RST in the stdlib's net.Conn read loop. - // see #2737 - retryable = true - - case errors.As(err, &urlErr): - // Refused connections should be retried as the service may not yet be - // running on the port. Go TCP dial considers refused connections as - // not temporary. - if strings.Contains(urlErr.Error(), "connection refused") { - retryable = true - } else { - return r.IsErrorRetryable(errors.Unwrap(urlErr)) - } - - case errors.As(err, &netOpErr): - // Network dial, or temporary network errors are always retryable. - if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { - retryable = true - } else { - return r.IsErrorRetryable(errors.Unwrap(netOpErr)) - } - - case errors.As(err, &tempErr) && tempErr.Temporary(): - // Fallback to the generic temporary check, with temporary errors - // retryable. - retryable = true - - case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): - // Fallback to the generic timeout check, with timeout errors - // retryable. - retryable = true - - default: - return aws.UnknownTernary - } - - return aws.BoolTernary(retryable) - -} - -// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status -// codes. -type RetryableHTTPStatusCode struct { - Codes map[int]struct{} -} - -// IsErrorRetryable return if the passed in error is retryable based on the -// HTTP status code. -func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { - var v interface{ HTTPStatusCode() int } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.HTTPStatusCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} - -// RetryableErrorCode determines if an attempt should be retried based on the -// API error code. -type RetryableErrorCode struct { - Codes map[string]struct{} -} - -// IsErrorRetryable return if the error is retryable based on the error codes. -// Returns unknown if the error doesn't have a code or it is unknown. -func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { - var v interface{ ErrorCode() string } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.ErrorCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} - -// retryableClockSkewError marks errors that can be caused by clock skew -// (difference between server time and client time). -// This is returned when there's certain confidence that adjusting the client time -// could allow a retry to succeed -type retryableClockSkewError struct{ Err error } - -func (e *retryableClockSkewError) Error() string { - return fmt.Sprintf("Probable clock skew error: %v", e.Err) -} - -// Unwrap returns the wrapped error. -func (e *retryableClockSkewError) Unwrap() error { - return e.Err -} - -// RetryableError allows the retryer to retry this request -func (e *retryableClockSkewError) RetryableError() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go deleted file mode 100644 index d5ea93222ed1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go +++ /dev/null @@ -1,269 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/ratelimit" -) - -// BackoffDelayer provides the interface for determining the delay to before -// another request attempt, that previously failed. -type BackoffDelayer interface { - BackoffDelay(attempt int, err error) (time.Duration, error) -} - -// BackoffDelayerFunc provides a wrapper around a function to determine the -// backoff delay of an attempt retry. -type BackoffDelayerFunc func(int, error) (time.Duration, error) - -// BackoffDelay returns the delay before attempt to retry a request. -func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { - return fn(attempt, err) -} - -const ( - // DefaultMaxAttempts is the maximum of attempts for an API request - DefaultMaxAttempts int = 3 - - // DefaultMaxBackoff is the maximum back off delay between attempts - DefaultMaxBackoff time.Duration = 20 * time.Second -) - -// Default retry token quota values. -const ( - DefaultRetryRateTokens uint = 500 - DefaultRetryCost uint = 5 - DefaultRetryTimeoutCost uint = 10 - DefaultNoRetryIncrement uint = 1 -) - -// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK -// should consider as retryable errors. -var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ - 500: {}, - 502: {}, - 503: {}, - 504: {}, -} - -// DefaultRetryableErrorCodes provides the set of API error codes that should -// be retried. -var DefaultRetryableErrorCodes = map[string]struct{}{ - "RequestTimeout": {}, - "RequestTimeoutException": {}, -} - -// DefaultThrottleErrorCodes provides the set of API error codes that are -// considered throttle errors. -var DefaultThrottleErrorCodes = map[string]struct{}{ - "Throttling": {}, - "ThrottlingException": {}, - "ThrottledException": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, - "ProvisionedThroughputExceededException": {}, - "TransactionInProgressException": {}, - "RequestLimitExceeded": {}, - "BandwidthLimitExceeded": {}, - "LimitExceededException": {}, - "RequestThrottled": {}, - "SlowDown": {}, - "PriorRequestNotComplete": {}, - "EC2ThrottledException": {}, -} - -// DefaultRetryables provides the set of retryable checks that are used by -// default. -var DefaultRetryables = []IsErrorRetryable{ - NoRetryCanceledError{}, - RetryableError{}, - RetryableConnectionError{}, - RetryableHTTPStatusCode{ - Codes: DefaultRetryableHTTPStatusCodes, - }, - RetryableErrorCode{ - Codes: DefaultRetryableErrorCodes, - }, - RetryableErrorCode{ - Codes: DefaultThrottleErrorCodes, - }, -} - -// DefaultTimeouts provides the set of timeout checks that are used by default. -var DefaultTimeouts = []IsErrorTimeout{ - TimeouterError{}, -} - -// StandardOptions provides the functional options for configuring the standard -// retryable, and delay behavior. -type StandardOptions struct { - // Maximum number of attempts that should be made. - MaxAttempts int - - // MaxBackoff duration between retried attempts. - MaxBackoff time.Duration - - // Provides the backoff strategy the retryer will use to determine the - // delay between retry attempts. - Backoff BackoffDelayer - - // Set of strategies to determine if the attempt should be retried based on - // the error response received. - // - // It is safe to append to this list in NewStandard's functional options. - Retryables []IsErrorRetryable - - // Set of strategies to determine if the attempt failed due to a timeout - // error. - // - // It is safe to append to this list in NewStandard's functional options. - Timeouts []IsErrorTimeout - - // Provides the rate limiting strategy for rate limiting attempt retries - // across all attempts the retryer is being used with. - // - // A RateLimiter operates as a token bucket with a set capacity, where - // attempt failures events consume tokens. A retry attempt that attempts to - // consume more tokens than what's available results in operation failure. - // The default implementation is parameterized as follows: - // - a capacity of 500 (DefaultRetryRateTokens) - // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost) - // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost) - // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement) - // - // You can disable rate limiting by setting this field to ratelimit.None. - RateLimiter RateLimiter - - // The cost to deduct from the RateLimiter's token bucket per retry. - RetryCost uint - - // The cost to deduct from the RateLimiter's token bucket per retry caused - // by timeout error. - RetryTimeoutCost uint - - // The cost to payback to the RateLimiter's token bucket for successful - // attempts. - NoRetryIncrement uint -} - -// RateLimiter provides the interface for limiting the rate of attempt retries -// allowed by the retryer. -type RateLimiter interface { - GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) - AddTokens(uint) error -} - -// Standard is the standard retry pattern for the SDK. It uses a set of -// retryable checks to determine of the failed attempt should be retried, and -// what retry delay should be used. -type Standard struct { - options StandardOptions - - timeout IsErrorTimeout - retryable IsErrorRetryable - backoff BackoffDelayer -} - -// NewStandard initializes a standard retry behavior with defaults that can be -// overridden via functional options. -func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { - o := StandardOptions{ - MaxAttempts: DefaultMaxAttempts, - MaxBackoff: DefaultMaxBackoff, - Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), - Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), - - RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), - RetryCost: DefaultRetryCost, - RetryTimeoutCost: DefaultRetryTimeoutCost, - NoRetryIncrement: DefaultNoRetryIncrement, - } - for _, fn := range fnOpts { - fn(&o) - } - if o.MaxAttempts <= 0 { - o.MaxAttempts = DefaultMaxAttempts - } - - backoff := o.Backoff - if backoff == nil { - backoff = NewExponentialJitterBackoff(o.MaxBackoff) - } - - return &Standard{ - options: o, - backoff: backoff, - retryable: IsErrorRetryables(o.Retryables), - timeout: IsErrorTimeouts(o.Timeouts), - } -} - -// MaxAttempts returns the maximum number of attempts that can be made for a -// request before failing. -func (s *Standard) MaxAttempts() int { - return s.options.MaxAttempts -} - -// IsErrorRetryable returns if the error is can be retried or not. Should not -// consider the number of attempts made. -func (s *Standard) IsErrorRetryable(err error) bool { - return s.retryable.IsErrorRetryable(err).Bool() -} - -// RetryDelay returns the delay to use before another request attempt is made. -func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { - return s.backoff.BackoffDelay(attempt, err) -} - -// GetAttemptToken returns the token to be released after then attempt completes. -// The release token will add NoRetryIncrement to the RateLimiter token pool if -// the attempt was successful. If the attempt failed, nothing will be done. -func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { - return s.GetInitialToken(), nil -} - -// GetInitialToken returns a token for adding the NoRetryIncrement to the -// RateLimiter token if the attempt completed successfully without error. -// -// InitialToken applies to result of the each attempt, including the first. -// Whereas the RetryToken applies to the result of subsequent attempts. -// -// Deprecated: use GetAttemptToken instead. -func (s *Standard) GetInitialToken() func(error) error { - return releaseToken(s.noRetryIncrement).release -} - -func (s *Standard) noRetryIncrement() error { - return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) -} - -// GetRetryToken attempts to deduct the retry cost from the retry token pool. -// Returning the token release function, or error. -func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { - cost := s.options.RetryCost - - if s.timeout.IsErrorTimeout(opErr).Bool() { - cost = s.options.RetryTimeoutCost - } - - fn, err := s.options.RateLimiter.GetToken(ctx, cost) - if err != nil { - return nil, fmt.Errorf("failed to get rate limit token, %w", err) - } - - return releaseToken(fn).release, nil -} - -func nopRelease(error) error { return nil } - -type releaseToken func() error - -func (f releaseToken) release(err error) error { - if err != nil { - return nil - } - - return f() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go deleted file mode 100644 index c4b844d15f19..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go +++ /dev/null @@ -1,60 +0,0 @@ -package retry - -import ( - "errors" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorThrottle provides the interface of an implementation to determine if -// a error response from an operation is a throttling error. -type IsErrorThrottle interface { - IsErrorThrottle(error) aws.Ternary -} - -// IsErrorThrottles is a collection of checks to determine of the error a -// throttle error. Iterates through the checks and returns the state of -// throttle if any check returns something other than unknown. -type IsErrorThrottles []IsErrorThrottle - -// IsErrorThrottle returns if the error is a throttle error if any of the -// checks in the list return a value other than unknown. -func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { - for _, re := range r { - if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. -type IsErrorThrottleFunc func(error) aws.Ternary - -// IsErrorThrottle returns if the error is a throttle error. -func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { - return fn(err) -} - -// ThrottleErrorCode determines if an attempt should be retried based on the -// API error code. -type ThrottleErrorCode struct { - Codes map[string]struct{} -} - -// IsErrorThrottle return if the error is a throttle error based on the error -// codes. Returns unknown if the error doesn't have a code or it is unknown. -func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { - var v interface{ ErrorCode() string } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.ErrorCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go deleted file mode 100644 index 3d47870d2dc2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go +++ /dev/null @@ -1,52 +0,0 @@ -package retry - -import ( - "errors" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorTimeout provides the interface of an implementation to determine if -// a error matches. -type IsErrorTimeout interface { - IsErrorTimeout(err error) aws.Ternary -} - -// IsErrorTimeouts is a collection of checks to determine of the error is -// retryable. Iterates through the checks and returns the state of retryable -// if any check returns something other than unknown. -type IsErrorTimeouts []IsErrorTimeout - -// IsErrorTimeout returns if the error is retryable if any of the checks in -// the list return a value other than unknown. -func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { - for _, t := range ts { - if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. -type IsErrorTimeoutFunc func(error) aws.Ternary - -// IsErrorTimeout returns if the error is retryable. -func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { - return fn(err) -} - -// TimeouterError provides the IsErrorTimeout implementation for determining if -// an error is a timeout based on type with the Timeout method. -type TimeouterError struct{} - -// IsErrorTimeout returns if the error is a timeout error. -func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { - var v interface{ Timeout() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - return aws.BoolTernary(v.Timeout()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go deleted file mode 100644 index b0ba4cb2f08d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "time" -) - -// RetryMode provides the mode the API client will use to create a retryer -// based on. -type RetryMode string - -const ( - // RetryModeStandard model provides rate limited retry attempts with - // exponential backoff delay. - RetryModeStandard RetryMode = "standard" - - // RetryModeAdaptive model provides attempt send rate limiting on throttle - // responses in addition to standard mode's retry rate limiting. - // - // Adaptive retry mode is experimental and is subject to change in the - // future. - RetryModeAdaptive RetryMode = "adaptive" -) - -// ParseRetryMode attempts to parse a RetryMode from the given string. -// Returning error if the value is not a known RetryMode. -func ParseRetryMode(v string) (mode RetryMode, err error) { - switch v { - case "standard": - return RetryModeStandard, nil - case "adaptive": - return RetryModeAdaptive, nil - default: - return mode, fmt.Errorf("unknown RetryMode, %v", v) - } -} - -func (m RetryMode) String() string { return string(m) } - -// Retryer is an interface to determine if a given error from a -// attempt should be retried, and if so what backoff delay to apply. The -// default implementation used by most services is the retry package's Standard -// type. Which contains basic retry logic using exponential backoff. -type Retryer interface { - // IsErrorRetryable returns if the failed attempt is retryable. This check - // should determine if the error can be retried, or if the error is - // terminal. - IsErrorRetryable(error) bool - - // MaxAttempts returns the maximum number of attempts that can be made for - // an attempt before failing. A value of 0 implies that the attempt should - // be retried until it succeeds if the errors are retryable. - MaxAttempts() int - - // RetryDelay returns the delay that should be used before retrying the - // attempt. Will return error if the delay could not be determined. - RetryDelay(attempt int, opErr error) (time.Duration, error) - - // GetRetryToken attempts to deduct the retry cost from the retry token pool. - // Returning the token release function, or error. - GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) - - // GetInitialToken returns the initial attempt token that can increment the - // retry token pool if the attempt is successful. - GetInitialToken() (releaseToken func(error) error) -} - -// RetryerV2 is an interface to determine if a given error from an attempt -// should be retried, and if so what backoff delay to apply. The default -// implementation used by most services is the retry package's Standard type. -// Which contains basic retry logic using exponential backoff. -// -// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken -// method in favor of GetAttemptToken which takes a context, and can return an error. -// -// The SDK's retry package's Attempt middleware, and utilities will always -// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if -// GetAttemptToken is not implemented. -type RetryerV2 interface { - Retryer - - // GetInitialToken returns the initial attempt token that can increment the - // retry token pool if the attempt is successful. - // - // Deprecated: This method does not provide a way to block using Context, - // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. - GetInitialToken() (releaseToken func(error) error) - - // GetAttemptToken returns the send token that can be used to rate limit - // attempt calls. Will be used by the SDK's retry package's Attempt - // middleware to get a send token prior to calling the temp and releasing - // the send token after the attempt has been made. - GetAttemptToken(context.Context) (func(error) error, error) -} - -// NopRetryer provides a RequestRetryDecider implementation that will flag -// all attempt errors as not retryable, with a max attempts of 1. -type NopRetryer struct{} - -// IsErrorRetryable returns false for all error values. -func (NopRetryer) IsErrorRetryable(error) bool { return false } - -// MaxAttempts always returns 1 for the original attempt. -func (NopRetryer) MaxAttempts() int { return 1 } - -// RetryDelay is not valid for the NopRetryer. Will always return error. -func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { - return 0, fmt.Errorf("not retrying any attempt errors") -} - -// GetRetryToken returns a stub function that does nothing. -func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { - return nopReleaseToken, nil -} - -// GetInitialToken returns a stub function that does nothing. -func (NopRetryer) GetInitialToken() func(error) error { - return nopReleaseToken -} - -// GetAttemptToken returns a stub function that does nothing. -func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { - return nopReleaseToken, nil -} - -func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go deleted file mode 100644 index 3af9b2b33614..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go +++ /dev/null @@ -1,14 +0,0 @@ -package aws - -// ExecutionEnvironmentID is the AWS execution environment runtime identifier. -type ExecutionEnvironmentID string - -// RuntimeEnvironment is a collection of values that are determined at runtime -// based on the environment that the SDK is executing in. Some of these values -// may or may not be present based on the executing environment and certain SDK -// configuration properties that drive whether these values are populated.. -type RuntimeEnvironment struct { - EnvironmentIdentifier ExecutionEnvironmentID - Region string - EC2InstanceMetadataRegion string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go deleted file mode 100644 index cbf22f1d0b0c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go +++ /dev/null @@ -1,115 +0,0 @@ -package v4 - -import ( - "strings" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -func lookupKey(service, region string) string { - var s strings.Builder - s.Grow(len(region) + len(service) + 3) - s.WriteString(region) - s.WriteRune('/') - s.WriteString(service) - return s.String() -} - -type derivedKey struct { - AccessKey string - Date time.Time - Credential []byte -} - -type derivedKeyCache struct { - values map[string]derivedKey - mutex sync.RWMutex -} - -func newDerivedKeyCache() derivedKeyCache { - return derivedKeyCache{ - values: make(map[string]derivedKey), - } -} - -func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { - key := lookupKey(service, region) - s.mutex.RLock() - if cred, ok := s.get(key, credentials, signingTime.Time); ok { - s.mutex.RUnlock() - return cred - } - s.mutex.RUnlock() - - s.mutex.Lock() - if cred, ok := s.get(key, credentials, signingTime.Time); ok { - s.mutex.Unlock() - return cred - } - cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) - entry := derivedKey{ - AccessKey: credentials.AccessKeyID, - Date: signingTime.Time, - Credential: cred, - } - s.values[key] = entry - s.mutex.Unlock() - - return cred -} - -func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { - cacheEntry, ok := s.retrieveFromCache(key) - if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { - return cacheEntry.Credential, true - } - return nil, false -} - -func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { - if v, ok := s.values[key]; ok { - return v, true - } - return derivedKey{}, false -} - -// SigningKeyDeriver derives a signing key from a set of credentials -type SigningKeyDeriver struct { - cache derivedKeyCache -} - -// NewSigningKeyDeriver returns a new SigningKeyDeriver -func NewSigningKeyDeriver() *SigningKeyDeriver { - return &SigningKeyDeriver{ - cache: newDerivedKeyCache(), - } -} - -// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. -func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { - return k.cache.Get(credential, service, region, signingTime) -} - -func deriveKey(secret, service, region string, t SigningTime) []byte { - hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) - hmacRegion := HMACSHA256(hmacDate, []byte(region)) - hmacService := HMACSHA256(hmacRegion, []byte(service)) - return HMACSHA256(hmacService, []byte("aws4_request")) -} - -func isSameDay(x, y time.Time) bool { - xYear, xMonth, xDay := x.Date() - yYear, yMonth, yDay := y.Date() - - if xYear != yYear { - return false - } - - if xMonth != yMonth { - return false - } - - return xDay == yDay -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go deleted file mode 100644 index a23cb003bf77..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go +++ /dev/null @@ -1,40 +0,0 @@ -package v4 - -// Signature Version 4 (SigV4) Constants -const ( - // EmptyStringSHA256 is the hex encoded sha256 value of an empty string - EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` - - // UnsignedPayload indicates that the request payload body is unsigned - UnsignedPayload = "UNSIGNED-PAYLOAD" - - // AmzAlgorithmKey indicates the signing algorithm - AmzAlgorithmKey = "X-Amz-Algorithm" - - // AmzSecurityTokenKey indicates the security token to be used with temporary credentials - AmzSecurityTokenKey = "X-Amz-Security-Token" - - // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' - AmzDateKey = "X-Amz-Date" - - // AmzCredentialKey is the access key ID and credential scope - AmzCredentialKey = "X-Amz-Credential" - - // AmzSignedHeadersKey is the set of headers signed for the request - AmzSignedHeadersKey = "X-Amz-SignedHeaders" - - // AmzSignatureKey is the query parameter to store the SigV4 signature - AmzSignatureKey = "X-Amz-Signature" - - // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter - TimeFormat = "20060102T150405Z" - - // ShortTimeFormat is the shorten time format used in the credential scope - ShortTimeFormat = "20060102" - - // ContentSHAKey is the SHA256 of request body - ContentSHAKey = "X-Amz-Content-Sha256" - - // StreamingEventsPayload indicates that the request payload body is a signed event stream. - StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go deleted file mode 100644 index c61955ad5b9b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" -) - -// Rules houses a set of Rule needed for validation of a -// string value -type Rules []Rule - -// Rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that Rule -type Rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r Rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// MapRule generic Rule for maps -type MapRule map[string]struct{} - -// IsValid for the map Rule satisfies whether it exists in the map -func (m MapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// AllowList is a generic Rule for include listing -type AllowList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (w AllowList) IsValid(value string) bool { - return w.Rule.IsValid(value) -} - -// ExcludeList is a generic Rule for exclude listing -type ExcludeList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (b ExcludeList) IsValid(value string) bool { - return !b.Rule.IsValid(value) -} - -// Patterns is a list of strings to match against -type Patterns []string - -// IsValid for Patterns checks each pattern and returns if a match has -// been found -func (p Patterns) IsValid(value string) bool { - for _, pattern := range p { - if sdkstrings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// InclusiveRules rules allow for rules to depend on one another -type InclusiveRules []Rule - -// IsValid will return true if all rules are true -func (r InclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go deleted file mode 100644 index d99b32ceb072..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ /dev/null @@ -1,70 +0,0 @@ -package v4 - -// IgnoredHeaders is a list of headers that are ignored during signing -var IgnoredHeaders = Rules{ - ExcludeList{ - MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - "Expect": struct{}{}, - "Transfer-Encoding": struct{}{}, - }, - }, -} - -// RequiredSignedHeaders is a allow list for Build canonical headers. -var RequiredSignedHeaders = Rules{ - AllowList{ - MapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Context": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - "X-Amz-Tagging": struct{}{}, - }, - }, - Patterns{"X-Amz-Object-Lock-"}, - Patterns{"X-Amz-Meta-"}, -} - -// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value -// represents whether or not it is a pattern. -var AllowedQueryHoisting = InclusiveRules{ - ExcludeList{RequiredSignedHeaders}, - Patterns{"X-Amz-"}, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go deleted file mode 100644 index e7fa7a1b1e60..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" -) - -// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. -func HMACSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go deleted file mode 100644 index bf93659a43f3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go +++ /dev/null @@ -1,75 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go deleted file mode 100644 index fc7887909e29..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import "strings" - -// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope -func BuildCredentialScope(signingTime SigningTime, region, service string) string { - return strings.Join([]string{ - signingTime.ShortTimeFormat(), - region, - service, - "aws4_request", - }, "/") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go deleted file mode 100644 index 1de06a765d1b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -import "time" - -// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. -type SigningTime struct { - time.Time - timeFormat string - shortTimeFormat string -} - -// NewSigningTime creates a new SigningTime given a time.Time -func NewSigningTime(t time.Time) SigningTime { - return SigningTime{ - Time: t, - } -} - -// TimeFormat provides a time formatted in the X-Amz-Date format. -func (m *SigningTime) TimeFormat() string { - return m.format(&m.timeFormat, TimeFormat) -} - -// ShortTimeFormat provides a time formatted of 20060102. -func (m *SigningTime) ShortTimeFormat() string { - return m.format(&m.shortTimeFormat, ShortTimeFormat) -} - -func (m *SigningTime) format(target *string, format string) string { - if len(*target) > 0 { - return *target - } - v := m.Time.Format(format) - *target = v - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go deleted file mode 100644 index d025dbaa0605..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go +++ /dev/null @@ -1,80 +0,0 @@ -package v4 - -import ( - "net/url" - "strings" -) - -const doubleSpace = " " - -// StripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func StripExcessSpaces(str string) string { - var j, k, l, m, spaces int - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - return str - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - return string(buf[:m]) -} - -// GetURIPath returns the escaped URI component from the provided URL. -func GetURIPath(u *url.URL) string { - var uriPath string - - if len(u.Opaque) > 0 { - const schemeSep, pathSep, queryStart = "//", "/", "?" - - opaque := u.Opaque - // Cut off the query string if present. - if idx := strings.Index(opaque, queryStart); idx >= 0 { - opaque = opaque[:idx] - } - - // Cutout the scheme separator if present. - if strings.Index(opaque, schemeSep) == 0 { - opaque = opaque[len(schemeSep):] - } - - // capture URI path starting with first path separator. - if idx := strings.Index(opaque, pathSep); idx >= 0 { - uriPath = opaque[idx:] - } - } else { - uriPath = u.EscapedPath() - } - - if len(uriPath) == 0 { - uriPath = "/" - } - - return uriPath -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go deleted file mode 100644 index 8a46220a37bb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ /dev/null @@ -1,420 +0,0 @@ -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const computePayloadHashMiddlewareID = "ComputePayloadHash" - -// HashComputationError indicates an error occurred while computing the signing hash -type HashComputationError struct { - Err error -} - -// Error is the error message -func (e *HashComputationError) Error() string { - return fmt.Sprintf("failed to compute payload hash: %v", e.Err) -} - -// Unwrap returns the underlying error if one is set -func (e *HashComputationError) Unwrap() error { - return e.Err -} - -// SigningError indicates an error condition occurred while performing SigV4 signing -type SigningError struct { - Err error -} - -func (e *SigningError) Error() string { - return fmt.Sprintf("failed to sign request: %v", e.Err) -} - -// Unwrap returns the underlying error cause -func (e *SigningError) Unwrap() error { - return e.Err -} - -// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that -// switches between unsigned and signed payload based on TLS state for request. -// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. -// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . -// -// Usage example - -// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to -// dynamically switch between unsigned and signed payload based on TLS state for request. -func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) - return err -} - -// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. -type dynamicPayloadSigningMiddleware struct { -} - -// ID returns the resolver identifier -func (m *dynamicPayloadSigningMiddleware) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize delegates SHA256 computation according to whether the request -// is TLS-enabled. -func (m *dynamicPayloadSigningMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if req.IsHTTPS() { - return (&UnsignedPayload{}).HandleFinalize(ctx, in, next) - } - return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next) -} - -// UnsignedPayload sets the SigV4 request payload hash to unsigned. -// -// Will not set the Unsigned Payload magic SHA value, if a SHA has already been -// stored in the context. (e.g. application pre-computed SHA256 before making -// API call). -// -// This middleware does not check the X-Amz-Content-Sha256 header, if that -// header is serialized a middleware must translate it into the context. -type UnsignedPayload struct{} - -// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation -// middleware stack -func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -// ID returns the unsignedPayload identifier -func (m *UnsignedPayload) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize sets the payload hash magic value to the unsigned sentinel. -func (m *UnsignedPayload) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetPayloadHash(ctx) == "" { - ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload) - } - return next.HandleFinalize(ctx, in) -} - -// ComputePayloadSHA256 computes SHA256 payload hash to sign. -// -// Will not set the Unsigned Payload magic SHA value, if a SHA has already been -// stored in the context. (e.g. application pre-computed SHA256 before making -// API call). -// -// This middleware does not check the X-Amz-Content-Sha256 header, if that -// header is serialized a middleware must translate it into the context. -type ComputePayloadSHA256 struct{} - -// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the -// operation middleware stack -func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the -// operation middleware stack -func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID) - return err -} - -// ID is the middleware name -func (m *ComputePayloadSHA256) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize computes the payload hash for the request, storing it to the -// context. This is a no-op if a caller has previously set that value. -func (m *ComputePayloadSHA256) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetPayloadHash(ctx) != "" { - return next.HandleFinalize(ctx, in) - } - - _, span := tracing.StartSpan(ctx, "ComputePayloadSHA256") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - hash := sha256.New() - if stream := req.GetStream(); stream != nil { - _, err = io.Copy(hash, stream) - if err != nil { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("failed to compute payload hash, %w", err), - } - } - - if err := req.RewindStream(); err != nil { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("failed to seek body to start, %w", err), - } - } - } - - ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) - - span.End() - return next.HandleFinalize(ctx, in) -} - -// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the -// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. -// -// Use this to disable computing the Payload SHA256 checksum and instead use -// UNSIGNED-PAYLOAD for the SHA256 value. -func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{}) - return err -} - -// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to -// the Payload hash stored in the context. -type ContentSHA256Header struct{} - -// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the -// operation middleware stack -func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) -} - -// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware -// from the operation middleware stack -func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID()) - return err -} - -// ID returns the ContentSHA256HeaderMiddleware identifier -func (m *ContentSHA256Header) ID() string { - return "SigV4ContentSHA256Header" -} - -// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash -// stored in the context. -func (m *ContentSHA256Header) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} - } - - req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) - return next.HandleFinalize(ctx, in) -} - -// SignHTTPRequestMiddlewareOptions is the configuration options for -// [SignHTTPRequestMiddleware]. -// -// Deprecated: [SignHTTPRequestMiddleware] is deprecated. -type SignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Signer HTTPSigner - LogSigning bool -} - -// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 -// HTTP Signing. -// -// Deprecated: AWS service clients no longer use this middleware. Signing as an -// SDK operation is now performed through an internal per-service middleware -// which opaquely selects and uses the signer from the resolved auth scheme. -type SignHTTPRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - signer HTTPSigner - logSigning bool -} - -// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using -// the given [Signer] for signing requests. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - signer: options.Signer, - logSigning: options.LogSigning, - } -} - -// ID is the SignHTTPRequestMiddleware identifier. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize will take the provided input and sign the request using the -// SigV4 authentication scheme. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if !haveCredentialProvider(s.credentialsProvider) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} - } - - signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) - payloadHash := GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} - } - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} - } - - signerOptions := []func(o *SignerOptions){ - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }, - } - - // existing DisableURIPathEscaping is equivalent in purpose - // to authentication scheme property DisableDoubleEncoding - disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) - if overridden { - signerOptions = append(signerOptions, func(o *SignerOptions) { - o.DisableURIPathEscaping = disableDoubleEncoding - }) - } - - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} - } - - ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) - - return next.HandleFinalize(ctx, in) -} - -// StreamingEventsPayload signs input event stream messages. -type StreamingEventsPayload struct{} - -// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. -func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before) -} - -// ID identifies the middleware. -func (s *StreamingEventsPayload) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize marks the input stream to be signed with SigV4. -func (s *StreamingEventsPayload) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - contentSHA := GetPayloadHash(ctx) - if len(contentSHA) == 0 { - contentSHA = v4Internal.StreamingEventsPayload - } - - ctx = SetPayloadHash(ctx, contentSHA) - - return next.HandleFinalize(ctx, in) -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - const authHeaderSignatureElem = "Signature=" - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ",") - for _, p := range ps { - p = strings.TrimSpace(p) - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func haveCredentialProvider(p aws.CredentialsProvider) bool { - if p == nil { - return false - } - - return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil)) -} - -type payloadHashKey struct{} - -// GetPayloadHash retrieves the payload hash to use for signing -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetPayloadHash(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) - return v -} - -// SetPayloadHash sets the payload hash to be used for signing the request -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetPayloadHash(ctx context.Context, hash string) context.Context { - return middleware.WithStackValue(ctx, payloadHashKey{}, hash) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go deleted file mode 100644 index e1a066512437..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go +++ /dev/null @@ -1,127 +0,0 @@ -package v4 - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - smithyHTTP "github.com/aws/smithy-go/transport/http" -) - -// HTTPPresigner is an interface to a SigV4 signer that can sign create a -// presigned URL for a HTTP requests. -type HTTPPresigner interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignedHTTPRequest provides the URL and signed headers that are included -// in the presigned URL. -type PresignedHTTPRequest struct { - URL string - Method string - SignedHeader http.Header -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Presigner HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - presigner HTTPPresigner - logSigning bool -} - -// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware -// initialized with the presigner. -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (s *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyHTTP.Request) - if !ok { - return out, metadata, &SigningError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - httpReq := req.Build(ctx) - if !haveCredentialProvider(s.credentialsProvider) { - out.Result = &PresignedHTTPRequest{ - URL: httpReq.URL.String(), - Method: httpReq.Method, - SignedHeader: http.Header{}, - } - - return out, metadata, nil - } - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - payloadHash := GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{ - Err: fmt.Errorf("computed payload hash missing from context"), - } - } - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - - u, h, err := s.presigner.PresignHTTP(ctx, credentials, - httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &PresignedHTTPRequest{ - URL: u, - Method: httpReq.Method, - SignedHeader: h, - } - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go deleted file mode 100644 index 32875e077989..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ /dev/null @@ -1,86 +0,0 @@ -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "github.com/aws/aws-sdk-go-v2/aws" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - "strings" - "time" -) - -// EventStreamSigner is an AWS EventStream protocol signer. -type EventStreamSigner interface { - GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) -} - -// StreamSignerOptions is the configuration options for StreamSigner. -type StreamSignerOptions struct{} - -// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. -type StreamSigner struct { - options StreamSignerOptions - - credentials aws.Credentials - service string - region string - - prevSignature []byte - - signingKeyDeriver *v4Internal.SigningKeyDeriver -} - -// NewStreamSigner returns a new AWS EventStream protocol signer. -func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { - o := StreamSignerOptions{} - - for _, fn := range optFns { - fn(&o) - } - - return &StreamSigner{ - options: o, - credentials: credentials, - service: service, - region: region, - signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), - prevSignature: seedSignature, - } -} - -// GetSignature signs the provided header and payload bytes. -func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - prevSignature := s.prevSignature - - st := v4Internal.NewSigningTime(signingTime.UTC()) - - sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) - - scope := v4Internal.BuildCredentialScope(st, s.region, s.service) - - stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) - - signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) - s.prevSignature = signature - - return signature, nil -} - -func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { - hash := sha256.New() - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - signingTime.TimeFormat(), - credentialScope, - hex.EncodeToString(previousSignature), - hex.EncodeToString(makeHash(hash, headers)), - hex.EncodeToString(makeHash(hash, payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go deleted file mode 100644 index 7ed91d5bac17..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ /dev/null @@ -1,564 +0,0 @@ -// Package v4 implements the AWS signature version 4 algorithm (commonly known -// as SigV4). -// -// For more information about SigV4, see [Signing AWS API requests] in the IAM -// user guide. -// -// While this implementation CAN work in an external context, it is developed -// primarily for SDK use and you may encounter fringe behaviors around header -// canonicalization. -// -// # Pre-escaping a request URI -// -// AWS v4 signature validation requires that the canonical string's URI path -// component must be the escaped form of the HTTP request's path. -// -// The Go HTTP client will perform escaping automatically on the HTTP request. -// This may cause signature validation errors because the request differs from -// the URI path or query from which the signature was generated. -// -// Because of this, we recommend that you explicitly escape the request when -// using this signer outside of the SDK to prevent possible signature mismatch. -// This can be done by setting URL.Opaque on the request. The signer will -// prefer that value, falling back to the return of URL.EscapedPath if unset. -// -// When setting URL.Opaque you must do so in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the escaping will not work -// correctly. -// -// The TestStandaloneSign unit test provides a complete example of using the -// signer outside of the SDK and pre-escaping the URI path. -// -// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash" - "net/http" - "net/textproto" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/logging" -) - -const ( - signingAlgorithm = "AWS4-HMAC-SHA256" - authorizationHeader = "Authorization" - - // Version of signing v4 - Version = "SigV4" -) - -// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests -type HTTPSigner interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error -} - -type keyDerivator interface { - DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte -} - -// SignerOptions is the SigV4 Signer options. -type SignerOptions struct { - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // The logger to send log messages to. - Logger logging.Logger - - // Enable logging of signed requests. - // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent - // presigned URL. - LogSigning bool - - // Disables setting the session token on the request as part of signing - // through X-Amz-Security-Token. This is needed for variations of v4 that - // present the token elsewhere. - DisableSessionToken bool -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - options SignerOptions - keyDerivator keyDerivator -} - -// NewSigner returns a new SigV4 Signer -func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { - options := SignerOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} -} - -type httpSigner struct { - Request *http.Request - ServiceName string - Region string - Time v4Internal.SigningTime - Credentials aws.Credentials - KeyDerivator keyDerivator - IsPreSign bool - - PayloadHash string - - DisableHeaderHoisting bool - DisableURIPathEscaping bool - DisableSessionToken bool -} - -func (s *httpSigner) Build() (signedRequest, error) { - req := s.Request - - query := req.URL.Query() - headers := req.Header - - s.setRequiredSigningFields(headers, query) - - // Sort Each Query Key's Values - for key := range query { - sort.Strings(query[key]) - } - - v4Internal.SanitizeHostForHeader(req) - - credentialScope := s.buildCredentialScope() - credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope - if s.IsPreSign { - query.Set(v4Internal.AmzCredentialKey, credentialStr) - } - - unsignedHeaders := headers - if s.IsPreSign && !s.DisableHeaderHoisting { - var urlValues url.Values - urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) - for k := range urlValues { - query[k] = urlValues[k] - } - } - - host := req.URL.Host - if len(req.Host) > 0 { - host = req.Host - } - - signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - - if s.IsPreSign { - query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) - } - - var rawQuery strings.Builder - rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) - - canonicalURI := v4Internal.GetURIPath(req.URL) - if !s.DisableURIPathEscaping { - canonicalURI = httpbinding.EscapePath(canonicalURI, false) - } - - canonicalString := s.buildCanonicalString( - req.Method, - canonicalURI, - rawQuery.String(), - signedHeadersStr, - canonicalHeaderStr, - ) - - strToSign := s.buildStringToSign(credentialScope, canonicalString) - signingSignature, err := s.buildSignature(strToSign) - if err != nil { - return signedRequest{}, err - } - - if s.IsPreSign { - rawQuery.WriteString("&X-Amz-Signature=") - rawQuery.WriteString(signingSignature) - } else { - headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) - } - - req.URL.RawQuery = rawQuery.String() - - return signedRequest{ - Request: req, - SignedHeaders: signedHeaders, - CanonicalString: canonicalString, - StringToSign: strToSign, - PreSigned: s.IsPreSign, - }, nil -} - -func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { - const credential = "Credential=" - const signedHeaders = "SignedHeaders=" - const signature = "Signature=" - const commaSpace = ", " - - var parts strings.Builder - parts.Grow(len(signingAlgorithm) + 1 + - len(credential) + len(credentialStr) + 2 + - len(signedHeaders) + len(signedHeadersStr) + 2 + - len(signature) + len(signingSignature), - ) - parts.WriteString(signingAlgorithm) - parts.WriteRune(' ') - parts.WriteString(credential) - parts.WriteString(credentialStr) - parts.WriteString(commaSpace) - parts.WriteString(signedHeaders) - parts.WriteString(signedHeadersStr) - parts.WriteString(commaSpace) - parts.WriteString(signature) - parts.WriteString(signingSignature) - return parts.String() -} - -// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// The payloadHash is the hex encoded SHA-256 hash of the request payload, and -// must be provided. Even if the request has no payload (aka body). If the -// request has no payload you should use the hex encoded SHA-256 of an empty -// string as the payloadHash value. -// -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// -// Some services such as Amazon S3 accept alternative values for the payload -// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be -// included in the request signature. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The passed in request will be modified in place. -func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - Region: region, - Credentials: credentials, - Time: v4Internal.NewSigningTime(signingTime.UTC()), - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - DisableSessionToken: options.DisableSessionToken, - KeyDerivator: s.keyDerivator, - } - - signedRequest, err := signer.Build() - if err != nil { - return err - } - - logSigningInfo(ctx, options, &signedRequest, false) - - return nil -} - -// PresignHTTP signs AWS v4 requests with the payload hash, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns the signed URL and the map of HTTP headers that were included in the -// signature or an error if signing the request failed. For presigned requests -// these headers and their values must be included on the HTTP request when it -// is made. This is helpful to know what header values need to be shared with -// the party the presigned request will be distributed to. -// -// The payloadHash is the hex encoded SHA-256 hash of the request payload, and -// must be provided. Even if the request has no payload (aka body). If the -// request has no payload you should use the hex encoded SHA-256 of an empty -// string as the payloadHash value. -// -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// -// Some services such as Amazon S3 accept alternative values for the payload -// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be -// included in the request signature. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// -// PresignHTTP differs from SignHTTP in that it will sign the request using -// query string instead of header values. This allows you to share the -// Presigned Request's URL with third parties, or distribute it throughout your -// system with minimal dependencies. -// -// PresignHTTP will not set the expires time of the presigned request -// automatically. To specify the expire duration for a request add the -// "X-Amz-Expires" query parameter on the request with the value as the -// duration in seconds the presigned URL should be considered valid for. This -// parameter is not used by all AWS services, and is most notable used by -// Amazon S3 APIs. -// -// expires := 20 * time.Minute -// query := req.URL.Query() -// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)) -// req.URL.RawQuery = query.Encode() -// -// This method does not modify the provided request. -func (s *Signer) PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*SignerOptions), -) (signedURI string, signedHeaders http.Header, err error) { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r.Clone(r.Context()), - PayloadHash: payloadHash, - ServiceName: service, - Region: region, - Credentials: credentials, - Time: v4Internal.NewSigningTime(signingTime.UTC()), - IsPreSign: true, - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - DisableSessionToken: options.DisableSessionToken, - KeyDerivator: s.keyDerivator, - } - - signedRequest, err := signer.Build() - if err != nil { - return "", nil, err - } - - logSigningInfo(ctx, options, &signedRequest, true) - - signedHeaders = make(http.Header) - - // For the signed headers we canonicalize the header keys in the returned map. - // This avoids situations where can standard library double headers like host header. For example the standard - // library will set the Host header, even if it is present in lower-case form. - for k, v := range signedRequest.SignedHeaders { - key := textproto.CanonicalMIMEHeaderKey(k) - signedHeaders[key] = append(signedHeaders[key], v...) - } - - return signedRequest.Request.URL.String(), signedHeaders, nil -} - -func (s *httpSigner) buildCredentialScope() string { - return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) -} - -func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - - // A list of headers to be converted to lower case to mitigate a limitation from S3 - lowerCaseHeaders := map[string]string{ - "X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508 - "X-Amz-Request-Payer": "x-amz-request-payer", // see #2764 - } - - for k, h := range header { - if newKey, ok := lowerCaseHeaders[k]; ok { - k = newKey - } - - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} - -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { - signed = make(http.Header) - - var headers []string - const hostHeader = "host" - headers = append(headers, hostHeader) - signed[hostHeader] = append(signed[hostHeader], host) - - const contentLengthHeader = "content-length" - if length > 0 { - headers = append(headers, contentLengthHeader) - signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) - } - - for k, v := range header { - if !rule.IsValid(k) { - continue // ignored header - } - if strings.EqualFold(k, contentLengthHeader) { - // prevent signing already handled content-length header. - continue - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := signed[lowerCaseKey]; ok { - // include additional values - signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - signed[lowerCaseKey] = v - } - sort.Strings(headers) - - signedHeaders = strings.Join(headers, ";") - - var canonicalHeaders strings.Builder - n := len(headers) - const colon = ':' - for i := 0; i < n; i++ { - if headers[i] == hostHeader { - canonicalHeaders.WriteString(hostHeader) - canonicalHeaders.WriteRune(colon) - canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) - } else { - canonicalHeaders.WriteString(headers[i]) - canonicalHeaders.WriteRune(colon) - // Trim out leading, trailing, and dedup inner spaces from signed header values. - values := signed[headers[i]] - for j, v := range values { - cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) - canonicalHeaders.WriteString(cleanedValue) - if j < len(values)-1 { - canonicalHeaders.WriteRune(',') - } - } - } - canonicalHeaders.WriteRune('\n') - } - canonicalHeadersStr = canonicalHeaders.String() - - return signed, signedHeaders, canonicalHeadersStr -} - -func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { - return strings.Join([]string{ - method, - uri, - query, - canonicalHeaders, - signedHeaders, - s.PayloadHash, - }, "\n") -} - -func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { - return strings.Join([]string{ - signingAlgorithm, - s.Time.TimeFormat(), - credentialScope, - hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), - }, "\n") -} - -func makeHash(hash hash.Hash, b []byte) []byte { - hash.Reset() - hash.Write(b) - return hash.Sum(nil) -} - -func (s *httpSigner) buildSignature(strToSign string) (string, error) { - key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) - return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil -} - -func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { - amzDate := s.Time.TimeFormat() - - if s.IsPreSign { - query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) - sessionToken := s.Credentials.SessionToken - if !s.DisableSessionToken && len(sessionToken) > 0 { - query.Set("X-Amz-Security-Token", sessionToken) - } - - query.Set(v4Internal.AmzDateKey, amzDate) - return - } - - headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) - - if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 { - headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) - } -} - -func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { - if !options.LogSigning { - return - } - signedURLMsg := "" - if isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) - } - logger := logging.WithContext(ctx, options.Logger) - logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) -} - -type signedRequest struct { - Request *http.Request - SignedHeaders http.Header - CanonicalString string - StringToSign string - PreSigned bool -} - -const logSignInfoMsg = `Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go deleted file mode 100644 index f3fc4d610dcd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by aws/generate.go DO NOT EDIT. - -package aws - -import ( - "github.com/aws/smithy-go/ptr" - "time" -) - -// Bool returns a pointer value for the bool value passed in. -func Bool(v bool) *bool { - return ptr.Bool(v) -} - -// BoolSlice returns a slice of bool pointers from the values -// passed in. -func BoolSlice(vs []bool) []*bool { - return ptr.BoolSlice(vs) -} - -// BoolMap returns a map of bool pointers from the values -// passed in. -func BoolMap(vs map[string]bool) map[string]*bool { - return ptr.BoolMap(vs) -} - -// Byte returns a pointer value for the byte value passed in. -func Byte(v byte) *byte { - return ptr.Byte(v) -} - -// ByteSlice returns a slice of byte pointers from the values -// passed in. -func ByteSlice(vs []byte) []*byte { - return ptr.ByteSlice(vs) -} - -// ByteMap returns a map of byte pointers from the values -// passed in. -func ByteMap(vs map[string]byte) map[string]*byte { - return ptr.ByteMap(vs) -} - -// String returns a pointer value for the string value passed in. -func String(v string) *string { - return ptr.String(v) -} - -// StringSlice returns a slice of string pointers from the values -// passed in. -func StringSlice(vs []string) []*string { - return ptr.StringSlice(vs) -} - -// StringMap returns a map of string pointers from the values -// passed in. -func StringMap(vs map[string]string) map[string]*string { - return ptr.StringMap(vs) -} - -// Int returns a pointer value for the int value passed in. -func Int(v int) *int { - return ptr.Int(v) -} - -// IntSlice returns a slice of int pointers from the values -// passed in. -func IntSlice(vs []int) []*int { - return ptr.IntSlice(vs) -} - -// IntMap returns a map of int pointers from the values -// passed in. -func IntMap(vs map[string]int) map[string]*int { - return ptr.IntMap(vs) -} - -// Int8 returns a pointer value for the int8 value passed in. -func Int8(v int8) *int8 { - return ptr.Int8(v) -} - -// Int8Slice returns a slice of int8 pointers from the values -// passed in. -func Int8Slice(vs []int8) []*int8 { - return ptr.Int8Slice(vs) -} - -// Int8Map returns a map of int8 pointers from the values -// passed in. -func Int8Map(vs map[string]int8) map[string]*int8 { - return ptr.Int8Map(vs) -} - -// Int16 returns a pointer value for the int16 value passed in. -func Int16(v int16) *int16 { - return ptr.Int16(v) -} - -// Int16Slice returns a slice of int16 pointers from the values -// passed in. -func Int16Slice(vs []int16) []*int16 { - return ptr.Int16Slice(vs) -} - -// Int16Map returns a map of int16 pointers from the values -// passed in. -func Int16Map(vs map[string]int16) map[string]*int16 { - return ptr.Int16Map(vs) -} - -// Int32 returns a pointer value for the int32 value passed in. -func Int32(v int32) *int32 { - return ptr.Int32(v) -} - -// Int32Slice returns a slice of int32 pointers from the values -// passed in. -func Int32Slice(vs []int32) []*int32 { - return ptr.Int32Slice(vs) -} - -// Int32Map returns a map of int32 pointers from the values -// passed in. -func Int32Map(vs map[string]int32) map[string]*int32 { - return ptr.Int32Map(vs) -} - -// Int64 returns a pointer value for the int64 value passed in. -func Int64(v int64) *int64 { - return ptr.Int64(v) -} - -// Int64Slice returns a slice of int64 pointers from the values -// passed in. -func Int64Slice(vs []int64) []*int64 { - return ptr.Int64Slice(vs) -} - -// Int64Map returns a map of int64 pointers from the values -// passed in. -func Int64Map(vs map[string]int64) map[string]*int64 { - return ptr.Int64Map(vs) -} - -// Uint returns a pointer value for the uint value passed in. -func Uint(v uint) *uint { - return ptr.Uint(v) -} - -// UintSlice returns a slice of uint pointers from the values -// passed in. -func UintSlice(vs []uint) []*uint { - return ptr.UintSlice(vs) -} - -// UintMap returns a map of uint pointers from the values -// passed in. -func UintMap(vs map[string]uint) map[string]*uint { - return ptr.UintMap(vs) -} - -// Uint8 returns a pointer value for the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return ptr.Uint8(v) -} - -// Uint8Slice returns a slice of uint8 pointers from the values -// passed in. -func Uint8Slice(vs []uint8) []*uint8 { - return ptr.Uint8Slice(vs) -} - -// Uint8Map returns a map of uint8 pointers from the values -// passed in. -func Uint8Map(vs map[string]uint8) map[string]*uint8 { - return ptr.Uint8Map(vs) -} - -// Uint16 returns a pointer value for the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return ptr.Uint16(v) -} - -// Uint16Slice returns a slice of uint16 pointers from the values -// passed in. -func Uint16Slice(vs []uint16) []*uint16 { - return ptr.Uint16Slice(vs) -} - -// Uint16Map returns a map of uint16 pointers from the values -// passed in. -func Uint16Map(vs map[string]uint16) map[string]*uint16 { - return ptr.Uint16Map(vs) -} - -// Uint32 returns a pointer value for the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return ptr.Uint32(v) -} - -// Uint32Slice returns a slice of uint32 pointers from the values -// passed in. -func Uint32Slice(vs []uint32) []*uint32 { - return ptr.Uint32Slice(vs) -} - -// Uint32Map returns a map of uint32 pointers from the values -// passed in. -func Uint32Map(vs map[string]uint32) map[string]*uint32 { - return ptr.Uint32Map(vs) -} - -// Uint64 returns a pointer value for the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return ptr.Uint64(v) -} - -// Uint64Slice returns a slice of uint64 pointers from the values -// passed in. -func Uint64Slice(vs []uint64) []*uint64 { - return ptr.Uint64Slice(vs) -} - -// Uint64Map returns a map of uint64 pointers from the values -// passed in. -func Uint64Map(vs map[string]uint64) map[string]*uint64 { - return ptr.Uint64Map(vs) -} - -// Float32 returns a pointer value for the float32 value passed in. -func Float32(v float32) *float32 { - return ptr.Float32(v) -} - -// Float32Slice returns a slice of float32 pointers from the values -// passed in. -func Float32Slice(vs []float32) []*float32 { - return ptr.Float32Slice(vs) -} - -// Float32Map returns a map of float32 pointers from the values -// passed in. -func Float32Map(vs map[string]float32) map[string]*float32 { - return ptr.Float32Map(vs) -} - -// Float64 returns a pointer value for the float64 value passed in. -func Float64(v float64) *float64 { - return ptr.Float64(v) -} - -// Float64Slice returns a slice of float64 pointers from the values -// passed in. -func Float64Slice(vs []float64) []*float64 { - return ptr.Float64Slice(vs) -} - -// Float64Map returns a map of float64 pointers from the values -// passed in. -func Float64Map(vs map[string]float64) map[string]*float64 { - return ptr.Float64Map(vs) -} - -// Time returns a pointer value for the time.Time value passed in. -func Time(v time.Time) *time.Time { - return ptr.Time(v) -} - -// TimeSlice returns a slice of time.Time pointers from the values -// passed in. -func TimeSlice(vs []time.Time) []*time.Time { - return ptr.TimeSlice(vs) -} - -// TimeMap returns a map of time.Time pointers from the values -// passed in. -func TimeMap(vs map[string]time.Time) map[string]*time.Time { - return ptr.TimeMap(vs) -} - -// Duration returns a pointer value for the time.Duration value passed in. -func Duration(v time.Duration) *time.Duration { - return ptr.Duration(v) -} - -// DurationSlice returns a slice of time.Duration pointers from the values -// passed in. -func DurationSlice(vs []time.Duration) []*time.Duration { - return ptr.DurationSlice(vs) -} - -// DurationMap returns a map of time.Duration pointers from the values -// passed in. -func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { - return ptr.DurationMap(vs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go deleted file mode 100644 index 8d7c35a9ec81..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ /dev/null @@ -1,342 +0,0 @@ -package http - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "reflect" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/tracing" -) - -// Defaults for the HTTPTransportBuilder. -var ( - // Default connection pool options - DefaultHTTPTransportMaxIdleConns = 100 - DefaultHTTPTransportMaxIdleConnsPerHost = 10 - - // Default connection timeouts - DefaultHTTPTransportIdleConnTimeout = 90 * time.Second - DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second - DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second - - // Default to TLS 1.2 for all HTTPS requests. - DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 -) - -// Timeouts for net.Dialer's network connection. -var ( - DefaultDialConnectTimeout = 30 * time.Second - DefaultDialKeepAliveTimeout = 30 * time.Second -) - -// BuildableClient provides a HTTPClient implementation with options to -// create copies of the HTTPClient when additional configuration is provided. -// -// The client's methods will not share the http.Transport value between copies -// of the BuildableClient. Only exported member values of the Transport and -// optional Dialer will be copied between copies of BuildableClient. -type BuildableClient struct { - transport *http.Transport - dialer *net.Dialer - - initOnce sync.Once - - clientTimeout time.Duration - client *http.Client -} - -// NewBuildableClient returns an initialized client for invoking HTTP -// requests. -func NewBuildableClient() *BuildableClient { - return &BuildableClient{} -} - -// Do implements the HTTPClient interface's Do method to invoke a HTTP request, -// and receive the response. Uses the BuildableClient's current -// configuration to invoke the http.Request. -// -// If connection pooling is enabled (aka HTTP KeepAlive) the client will only -// share pooled connections with its own instance. Copies of the -// BuildableClient will have their own connection pools. -// -// Redirect (3xx) responses will not be followed, the HTTP response received -// will returned instead. -func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { - b.initOnce.Do(b.build) - - return b.client.Do(req) -} - -// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. -// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. -func (b *BuildableClient) Freeze() aws.HTTPClient { - cpy := b.clone() - cpy.build() - return cpy.client -} - -func (b *BuildableClient) build() { - b.client = wrapWithLimitedRedirect(&http.Client{ - Timeout: b.clientTimeout, - Transport: b.GetTransport(), - }) -} - -func (b *BuildableClient) clone() *BuildableClient { - cpy := NewBuildableClient() - cpy.transport = b.GetTransport() - cpy.dialer = b.GetDialer() - cpy.clientTimeout = b.clientTimeout - - return cpy -} - -// WithTransportOptions copies the BuildableClient and returns it with the -// http.Transport options applied. -// -// If a non (*http.Transport) was set as the round tripper, the round tripper -// will be replaced with a default Transport value before invoking the option -// functions. -func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { - cpy := b.clone() - - tr := cpy.GetTransport() - for _, opt := range opts { - opt(tr) - } - cpy.transport = tr - - return cpy -} - -// WithDialerOptions copies the BuildableClient and returns it with the -// net.Dialer options applied. Will set the client's http.Transport DialContext -// member. -func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { - cpy := b.clone() - - dialer := cpy.GetDialer() - for _, opt := range opts { - opt(dialer) - } - cpy.dialer = dialer - - tr := cpy.GetTransport() - tr.DialContext = cpy.dialer.DialContext - cpy.transport = tr - - return cpy -} - -// WithTimeout Sets the timeout used by the client for all requests. -func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { - cpy := b.clone() - cpy.clientTimeout = timeout - return cpy -} - -// GetTransport returns a copy of the client's HTTP Transport. -func (b *BuildableClient) GetTransport() *http.Transport { - var tr *http.Transport - if b.transport != nil { - tr = b.transport.Clone() - } else { - tr = defaultHTTPTransport() - } - - return tr -} - -// GetDialer returns a copy of the client's network dialer. -func (b *BuildableClient) GetDialer() *net.Dialer { - var dialer *net.Dialer - if b.dialer != nil { - dialer = shallowCopyStruct(b.dialer).(*net.Dialer) - } else { - dialer = defaultDialer() - } - - return dialer -} - -// GetTimeout returns a copy of the client's timeout to cancel requests with. -func (b *BuildableClient) GetTimeout() time.Duration { - return b.clientTimeout -} - -func defaultDialer() *net.Dialer { - return &net.Dialer{ - Timeout: DefaultDialConnectTimeout, - KeepAlive: DefaultDialKeepAliveTimeout, - DualStack: true, - } -} - -func defaultHTTPTransport() *http.Transport { - dialer := defaultDialer() - - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: traceDialContext(dialer.DialContext), - TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, - MaxIdleConns: DefaultHTTPTransportMaxIdleConns, - MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, - IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, - ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, - ForceAttemptHTTP2: true, - TLSClientConfig: &tls.Config{ - MinVersion: DefaultHTTPTransportTLSMinVersion, - }, - } - - return tr -} - -type dialContext func(ctx context.Context, network, addr string) (net.Conn, error) - -func traceDialContext(dc dialContext) dialContext { - return func(ctx context.Context, network, addr string) (net.Conn, error) { - span, _ := tracing.GetSpan(ctx) - span.SetProperty("net.peer.name", addr) - - conn, err := dc(ctx, network, addr) - if err != nil { - return conn, err - } - - raddr := conn.RemoteAddr() - if raddr == nil { - return conn, err - } - - host, port, err := net.SplitHostPort(raddr.String()) - if err != nil { // don't blow up just because we couldn't parse - span.SetProperty("net.peer.addr", raddr.String()) - } else { - span.SetProperty("net.peer.host", host) - span.SetProperty("net.peer.port", port) - } - - return conn, err - } -} - -// shallowCopyStruct creates a shallow copy of the passed in source struct, and -// returns that copy of the same struct type. -func shallowCopyStruct(src interface{}) interface{} { - srcVal := reflect.ValueOf(src) - srcValType := srcVal.Type() - - var returnAsPtr bool - if srcValType.Kind() == reflect.Ptr { - srcVal = srcVal.Elem() - srcValType = srcValType.Elem() - returnAsPtr = true - } - dstVal := reflect.New(srcValType).Elem() - - for i := 0; i < srcValType.NumField(); i++ { - ft := srcValType.Field(i) - if len(ft.PkgPath) != 0 { - // unexported fields have a PkgPath - continue - } - - dstVal.Field(i).Set(srcVal.Field(i)) - } - - if returnAsPtr { - dstVal = dstVal.Addr() - } - - return dstVal.Interface() -} - -// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to -// not follow any redirect other than 307 and 308. No other redirect will be -// followed. -// -// If the client does not have a Transport defined will use a new SDK default -// http.Transport configuration. -func wrapWithLimitedRedirect(c *http.Client) *http.Client { - tr := c.Transport - if tr == nil { - tr = defaultHTTPTransport() - } - - cc := *c - cc.CheckRedirect = limitedRedirect - cc.Transport = suppressBadHTTPRedirectTransport{ - tr: tr, - } - - return &cc -} - -// limitedRedirect is a CheckRedirect that prevents the client from following -// any non 307/308 HTTP status code redirects. -// -// The 307 and 308 redirects are allowed because the client must use the -// original HTTP method for the redirected to location. Whereas 301 and 302 -// allow the client to switch to GET for the redirect. -// -// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. -func limitedRedirect(r *http.Request, via []*http.Request) error { - // Request.Response, in CheckRedirect is the response that is triggering - // the redirect. - resp := r.Response - if r.URL.String() == badHTTPRedirectLocation { - resp.Header.Del(badHTTPRedirectLocation) - return http.ErrUseLastResponse - } - - switch resp.StatusCode { - case 307, 308: - // Only allow 307 and 308 redirects as they preserve the method. - return nil - } - - return http.ErrUseLastResponse -} - -// suppressBadHTTPRedirectTransport provides an http.RoundTripper -// implementation that wraps another http.RoundTripper to prevent HTTP client -// receiving 301 and 302 HTTP responses redirects without the required location -// header. -// -// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, -// that check for responses with having a URL of baseHTTPRedirectLocation, and -// suppress the redirect. -type suppressBadHTTPRedirectTransport struct { - tr http.RoundTripper -} - -const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` - -// RoundTrip backfills a stub location when a 301/302 response is received -// without a location. This stub location is used by limitedRedirect to prevent -// the HTTP client from failing attempting to use follow a redirect without a -// location value. -func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { - resp, err := t.tr.RoundTrip(r) - if err != nil { - return resp, err - } - - // S3 is the only known service to return 301 without location header. - // The Go standard library HTTP client will return an opaque error if it - // tries to follow a 301/302 response missing the location header. - switch resp.StatusCode { - case 301, 302: - if v := resp.Header.Get("Location"); len(v) == 0 { - resp.Header.Set("Location", badHTTPRedirectLocation) - } - } - - return resp, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go deleted file mode 100644 index 556f54a7f777..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go +++ /dev/null @@ -1,42 +0,0 @@ -package http - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// removeContentTypeHeader is a build middleware that removes -// content type header if content-length header is unset or -// is set to zero, -type removeContentTypeHeader struct { -} - -// ID the name of the middleware. -func (m *removeContentTypeHeader) ID() string { - return "RemoveContentTypeHeader" -} - -// HandleBuild adds or appends the constructed user agent to the request. -func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - // remove contentTypeHeader when content-length is zero - if req.ContentLength == 0 { - req.Header.Del("content-type") - } - - return next.HandleBuild(ctx, in) -} - -// RemoveContentTypeHeader removes content-type header if -// content length is unset or equal to zero. -func RemoveContentTypeHeader(stack *middleware.Stack) error { - return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go deleted file mode 100644 index 44651c9902df..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go +++ /dev/null @@ -1,33 +0,0 @@ -package http - -import ( - "errors" - "fmt" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ResponseError provides the HTTP centric error type wrapping the underlying error -// with the HTTP response value and the deserialized RequestID. -type ResponseError struct { - *smithyhttp.ResponseError - - // RequestID associated with response error - RequestID string -} - -// ServiceRequestID returns the request id associated with Response Error -func (e *ResponseError) ServiceRequestID() string { return e.RequestID } - -// Error returns the formatted error -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "https response error StatusCode: %d, RequestID: %s, %v", - e.Response.StatusCode, e.RequestID, e.Err) -} - -// As populates target and returns true if the type of target is a error type that -// the ResponseError embeds, (e.g.AWS HTTP ResponseError) -func (e *ResponseError) As(target interface{}) bool { - return errors.As(e.ResponseError, target) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go deleted file mode 100644 index a1ad20fe3418..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go +++ /dev/null @@ -1,56 +0,0 @@ -package http - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddResponseErrorMiddleware adds response error wrapper middleware -func AddResponseErrorMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before request id retriever middleware so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) -} - -// ResponseErrorWrapper wraps operation errors with ResponseError. -type ResponseErrorWrapper struct { -} - -// ID returns the middleware identifier -func (m *ResponseErrorWrapper) ID() string { - return "ResponseErrorWrapper" -} - -// HandleDeserialize wraps the stack error with smithyhttp.ResponseError. -func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err == nil { - // Nothing to do when there is no error. - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // look for request id in metadata - reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) - - // Wrap the returned smithy error with the request id retrieved from the metadata - err = &ResponseError{ - ResponseError: &smithyhttp.ResponseError{ - Response: resp, - Err: err, - }, - RequestID: reqID, - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go deleted file mode 100644 index 993929bd9b7a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go +++ /dev/null @@ -1,104 +0,0 @@ -package http - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type readResult struct { - n int - err error -} - -// ResponseTimeoutError is an error when the reads from the response are -// delayed longer than the timeout the read was configured for. -type ResponseTimeoutError struct { - TimeoutDur time.Duration -} - -// Timeout returns that the error is was caused by a timeout, and can be -// retried. -func (*ResponseTimeoutError) Timeout() bool { return true } - -func (e *ResponseTimeoutError) Error() string { - return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, &ResponseTimeoutError{TimeoutDur: r.duration} - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the -// response body so that a read that takes too long will return an error. -func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { - return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) -} - -// readTimeout wraps the response body with a timeoutReadCloser -type readTimeout struct { - duration time.Duration -} - -// ID returns the id of the middleware -func (*readTimeout) ID() string { - return "ReadResponseTimeout" -} - -// HandleDeserialize implements the DeserializeMiddleware interface -func (m *readTimeout) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - response.Body = &timeoutReadCloser{ - reader: response.Body, - duration: m.duration, - } - out.RawResponse = response - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go deleted file mode 100644 index cc3ae811402d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go +++ /dev/null @@ -1,42 +0,0 @@ -package aws - -import ( - "fmt" -) - -// Ternary is an enum allowing an unknown or none state in addition to a bool's -// true and false. -type Ternary int - -func (t Ternary) String() string { - switch t { - case UnknownTernary: - return "unknown" - case FalseTernary: - return "false" - case TrueTernary: - return "true" - default: - return fmt.Sprintf("unknown value, %d", int(t)) - } -} - -// Bool returns true if the value is TrueTernary, false otherwise. -func (t Ternary) Bool() bool { - return t == TrueTernary -} - -// Enumerations for the values of the Ternary type. -const ( - UnknownTernary Ternary = iota - FalseTernary - TrueTernary -) - -// BoolTernary returns a true or false Ternary value for the bool provided. -func BoolTernary(v bool) Ternary { - if v { - return TrueTernary - } - return FalseTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go deleted file mode 100644 index 5f729d45e1cd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go-v2" - -// SDKVersion is the version of this SDK -const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md deleted file mode 100644 index 7c5a87fbe7c0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ /dev/null @@ -1,908 +0,0 @@ -# v1.31.3 (2025-08-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.2 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.1 (2025-08-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.0 (2025-08-11) - -* **Feature**: Add support for configuring per-service Options via callback on global config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.3 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.2 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.1 (2025-07-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.18 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.17 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.16 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.15 (2025-06-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.14 (2025-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.13 (2025-04-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.12 (2025-03-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.11 (2025-03-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.10 (2025-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.9 (2025-03-04.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.8 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.7 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.6 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.5 (2025-02-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.4 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.3 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.2 (2025-01-24) - -* **Bug Fix**: Fix env config naming and usage of deprecated ioutil -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.29.1 (2025-01-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.0 (2025-01-15) - -* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.11 (2025-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.10 (2025-01-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.9 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.8 (2025-01-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.7 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-10-16) - -* **Feature**: Adds the LoadOptions hook `WithBaseEndpoint` for setting global endpoint override in-code. - -# v1.27.43 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.42 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.41 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.40 (2024-10-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.39 (2024-09-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.38 (2024-09-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.37 (2024-09-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.36 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.35 (2024-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.34 (2024-09-16) - -* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it - -# v1.27.33 (2024-09-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.32 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.31 (2024-08-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.30 (2024-08-23) - -* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file - -# v1.27.29 (2024-08-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.28 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.27 (2024-07-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.26 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.25 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.24 (2024-07-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.23 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.22 (2024-06-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.21 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.20 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.19 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.18 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.17 (2024-06-03) - -* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.16 (2024-05-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.15 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.14 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.13 (2024-05-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.12 (2024-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.11 (2024-04-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.10 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.9 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.8 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.7 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.6 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.5 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.3 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2024-01-22) - -* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.4 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2023-12-08) - -* **Bug Fix**: Correct loading of [services *] sections into shared config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2023-12-07) - -* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.12 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.11 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.10 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.9 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.8 (2023-11-28.3) - -* **Bug Fix**: Correct resolution of S3Express auth disable toggle. - -# v1.25.7 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2023-11-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2023-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.2 (2023-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.1 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2023-11-14) - -* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-11-13) - -* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-11-09.2) - -* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.3 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.2 (2023-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2023-11-06) - -* No change notes available for this release. - -# v1.22.0 (2023-11-02) - -* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-10-24) - -* No change notes available for this release. - -# v1.19.0 (2023-10-16) - -* **Feature**: Modify logic of retrieving user agent appID from env config - -# v1.18.45 (2023-10-12) - -* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.44 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.43 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.42 (2023-09-22) - -* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. -* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.41 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.40 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.39 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.38 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.37 (2023-08-23) - -* No change notes available for this release. - -# v1.18.36 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.35 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.34 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.33 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.32 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.29 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.28 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.27 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.26 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.25 (2023-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.24 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.23 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.22 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.21 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.20 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.19 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.18 (2023-03-16) - -* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. - -# v1.18.17 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.16 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.15 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.14 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.13 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.12 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.11 (2023-02-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.10 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.8 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2022-12-15) - -* **Bug Fix**: Unify logic between shared config and in finding home directory -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.4 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2022-11-11) - -* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 -* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-08-14) - -* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. - -# v1.16.1 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2022-08-10) - -* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. - -# v1.15.17 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.16 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.15 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.14 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.13 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.12 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.11 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.10 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.9 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.8 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.7 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2022-05-09) - -* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) - -# v1.15.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-02-24) - -* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. -* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-01-28) - -* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. -* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-01-07) - -* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-12-02) - -* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.3 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-09-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-09-02) - -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. - -# v1.7.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-07-15) - -* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-25) - -* **Feature**: Adds configuration setting for enabling endpoint discovery. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-20) - -* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. -* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/auth_scheme_preference.go b/vendor/github.com/aws/aws-sdk-go-v2/config/auth_scheme_preference.go deleted file mode 100644 index 99e1236614d0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/auth_scheme_preference.go +++ /dev/null @@ -1,19 +0,0 @@ -package config - -import "strings" - -func toAuthSchemePreferenceList(cfg string) []string { - if len(cfg) == 0 { - return nil - } - parts := strings.Split(cfg, ",") - ids := make([]string, 0, len(parts)) - - for _, p := range parts { - if id := strings.TrimSpace(p); len(id) > 0 { - ids = append(ids, id) - } - } - - return ids -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go deleted file mode 100644 index caa20a158aa4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ /dev/null @@ -1,235 +0,0 @@ -package config - -import ( - "context" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// defaultAWSConfigResolvers are a slice of functions that will resolve external -// configuration values into AWS configuration values. -// -// This will setup the AWS configuration's Region, -var defaultAWSConfigResolvers = []awsConfigResolver{ - // Resolves the default configuration the SDK's aws.Config will be - // initialized with. - resolveDefaultAWSConfig, - - // Sets the logger to be used. Could be user provided logger, and client - // logging mode. - resolveLogger, - resolveClientLogMode, - - // Sets the HTTP client and configuration to use for making requests using - // the HTTP transport. - resolveHTTPClient, - resolveCustomCABundle, - - // Sets the endpoint resolving behavior the API Clients will use for making - // requests to. Clients default to their own clients this allows overrides - // to be specified. The resolveEndpointResolver option is deprecated, but - // we still need to set it for backwards compatibility on config - // construction. - resolveEndpointResolver, - resolveEndpointResolverWithOptions, - - // Sets the retry behavior API clients will use within their retry attempt - // middleware. Defaults to unset, allowing API clients to define their own - // retry behavior. - resolveRetryer, - - // Sets the region the API Clients should use for making requests to. - resolveRegion, - resolveEC2IMDSRegion, - resolveDefaultRegion, - - // Sets the additional set of middleware stack mutators that will custom - // API client request pipeline middleware. - resolveAPIOptions, - - // Resolves the DefaultsMode that should be used by SDK clients. If this - // mode is set to DefaultsModeAuto. - // - // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is - // configured if provided before invoking IMDS if mode is auto. Comes - // before resolving credentials so that those subsequent clients use the - // configured auto mode. - resolveDefaultsModeOptions, - - // Sets the resolved credentials the API clients will use for - // authentication. Provides the SDK's default credential chain. - // - // Should probably be the last step in the resolve chain to ensure that all - // other configurations are resolved first in case downstream credentials - // implementations depend on or can be configured with earlier resolved - // configuration options. - resolveCredentials, - - // Sets the resolved bearer authentication token API clients will use for - // httpBearerAuth authentication scheme. - resolveBearerAuthToken, - - // Sets the sdk app ID if present in env var or shared config profile - resolveAppID, - - resolveBaseEndpoint, - - // Sets the DisableRequestCompression if present in env var or shared config profile - resolveDisableRequestCompression, - - // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile - resolveRequestMinCompressSizeBytes, - - // Sets the AccountIDEndpointMode if present in env var or shared config profile - resolveAccountIDEndpointMode, - - // Sets the RequestChecksumCalculation if present in env var or shared config profile - resolveRequestChecksumCalculation, - - // Sets the ResponseChecksumValidation if present in env var or shared config profile - resolveResponseChecksumValidation, - - resolveInterceptors, - - resolveAuthSchemePreference, - - // Sets the ServiceOptions if present in LoadOptions - resolveServiceOptions, -} - -// A Config represents a generic configuration value or set of values. This type -// will be used by the AWSConfigResolvers to extract -// -// General the Config type will use type assertion against the Provider interfaces -// to extract specific data from the Config. -type Config interface{} - -// A loader is used to load external configuration data and returns it as -// a generic Config type. -// -// The loader should return an error if it fails to load the external configuration -// or the configuration data is malformed, or required components missing. -type loader func(context.Context, configs) (Config, error) - -// An awsConfigResolver will extract configuration data from the configs slice -// using the provider interfaces to extract specific functionality. The extracted -// configuration values will be written to the AWS Config value. -// -// The resolver should return an error if it it fails to extract the data, the -// data is malformed, or incomplete. -type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error - -// configs is a slice of Config values. These values will be used by the -// AWSConfigResolvers to extract external configuration values to populate the -// AWS Config type. -// -// Use AppendFromLoaders to add additional external Config values that are -// loaded from external sources. -// -// Use ResolveAWSConfig after external Config values have been added or loaded -// to extract the loaded configuration values into the AWS Config. -type configs []Config - -// AppendFromLoaders iterates over the slice of loaders passed in calling each -// loader function in order. The external config value returned by the loader -// will be added to the returned configs slice. -// -// If a loader returns an error this method will stop iterating and return -// that error. -func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { - for _, fn := range loaders { - cfg, err := fn(ctx, cs) - if err != nil { - return nil, err - } - - cs = append(cs, cfg) - } - - return cs, nil -} - -// ResolveAWSConfig returns a AWS configuration populated with values by calling -// the resolvers slice passed in. Each resolver is called in order. Any resolver -// may overwrite the AWS Configuration value of a previous resolver. -// -// If an resolver returns an error this method will return that error, and stop -// iterating over the resolvers. -func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { - var cfg aws.Config - - for _, fn := range resolvers { - if err := fn(ctx, &cfg, cs); err != nil { - return aws.Config{}, err - } - } - - return cfg, nil -} - -// ResolveConfig calls the provide function passing slice of configuration sources. -// This implements the aws.ConfigResolver interface. -func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { - var cfgs []interface{} - for i := range cs { - cfgs = append(cfgs, cs[i]) - } - return f(cfgs) -} - -// LoadDefaultConfig reads the SDK's default external configurations, and -// populates an AWS Config with the values from the external configurations. -// -// An optional variadic set of additional Config values can be provided as input -// that will be prepended to the configs slice. Use this to add custom configuration. -// The custom configurations must satisfy the respective providers for their data -// or the custom data will be ignored by the resolvers and config loaders. -// -// cfg, err := config.LoadDefaultConfig( context.TODO(), -// config.WithSharedConfigProfile("test-profile"), -// ) -// if err != nil { -// panic(fmt.Sprintf("failed loading config, %v", err)) -// } -// -// The default configuration sources are: -// * Environment Variables -// * Shared Configuration and Shared Credentials files. -func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { - var options LoadOptions - for _, optFn := range optFns { - if err := optFn(&options); err != nil { - return aws.Config{}, err - } - } - - // assign Load Options to configs - var cfgCpy = configs{options} - - cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) - if err != nil { - return aws.Config{}, err - } - - cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) - if err != nil { - return aws.Config{}, err - } - - return cfg, nil -} - -func resolveConfigLoaders(options *LoadOptions) []loader { - loaders := make([]loader, 2) - loaders[0] = loadEnvConfig - - // specification of a profile should cause a load failure if it doesn't exist - if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" { - loaders[1] = loadSharedConfig - } else { - loaders[1] = loadSharedConfigIgnoreNotExist - } - - return loaders -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go deleted file mode 100644 index 20b66367ffd2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go +++ /dev/null @@ -1,47 +0,0 @@ -package config - -import ( - "context" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" -) - -const execEnvVar = "AWS_EXECUTION_ENV" - -// DefaultsModeOptions is the set of options that are used to configure -type DefaultsModeOptions struct { - // The SDK configuration defaults mode. Defaults to legacy if not specified. - // - // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard - Mode aws.DefaultsMode - - // The EC2 Instance Metadata Client that should be used when performing environment - // discovery when aws.DefaultsModeAuto is set. - // - // If not specified the SDK will construct a client if the instance metadata service has not been disabled by - // the AWS_EC2_METADATA_DISABLED environment variable. - IMDSClient *imds.Client -} - -func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { - getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) - // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. - select { - case <-ctx.Done(): - return aws.RuntimeEnvironment{}, err - default: - } - - var imdsRegion string - if err == nil { - imdsRegion = getRegionOutput.Region - } - - return aws.RuntimeEnvironment{ - EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), - Region: envConfig.Region, - EC2InstanceMetadataRegion: imdsRegion, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go deleted file mode 100644 index aab7164e2835..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package config provides utilities for loading configuration from multiple -// sources that can be used to configure the SDK's API clients, and utilities. -// -// The config package will load configuration from environment variables, AWS -// shared configuration file (~/.aws/config), and AWS shared credentials file -// (~/.aws/credentials). -// -// Use the LoadDefaultConfig to load configuration from all the SDK's supported -// sources, and resolve credentials using the SDK's default credential chain. -// -// LoadDefaultConfig allows for a variadic list of additional Config sources that can -// provide one or more configuration values which can be used to programmatically control the resolution -// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. -// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will -// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources -// implement the same provider interface, priority will be handled by the order in which the sources were passed in. -// -// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider -// interface. These helpers should be used for overriding configuration programmatically at runtime. -package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go deleted file mode 100644 index e932c63dfb4a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ /dev/null @@ -1,932 +0,0 @@ -package config - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" -) - -// CredentialsSourceName provides a name of the provider when config is -// loaded from environment. -const CredentialsSourceName = "EnvConfigCredentials" - -// Environment variables that will be read for configuration values. -const ( - awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID" - awsAccessKeyEnv = "AWS_ACCESS_KEY" - - awsSecretAccessKeyEnv = "AWS_SECRET_ACCESS_KEY" - awsSecretKeyEnv = "AWS_SECRET_KEY" - - awsSessionTokenEnv = "AWS_SESSION_TOKEN" - - awsContainerCredentialsFullURIEnv = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - awsContainerCredentialsRelativeURIEnv = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - awsContainerAuthorizationTokenEnv = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - - awsRegionEnv = "AWS_REGION" - awsDefaultRegionEnv = "AWS_DEFAULT_REGION" - - awsProfileEnv = "AWS_PROFILE" - awsDefaultProfileEnv = "AWS_DEFAULT_PROFILE" - - awsSharedCredentialsFileEnv = "AWS_SHARED_CREDENTIALS_FILE" - - awsConfigFileEnv = "AWS_CONFIG_FILE" - - awsCABundleEnv = "AWS_CA_BUNDLE" - - awsWebIdentityTokenFileEnv = "AWS_WEB_IDENTITY_TOKEN_FILE" - - awsRoleARNEnv = "AWS_ROLE_ARN" - awsRoleSessionNameEnv = "AWS_ROLE_SESSION_NAME" - - awsEnableEndpointDiscoveryEnv = "AWS_ENABLE_ENDPOINT_DISCOVERY" - - awsS3UseARNRegionEnv = "AWS_S3_USE_ARN_REGION" - - awsEc2MetadataServiceEndpointModeEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" - - awsEc2MetadataServiceEndpointEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - awsEc2MetadataDisabledEnv = "AWS_EC2_METADATA_DISABLED" - awsEc2MetadataV1DisabledEnv = "AWS_EC2_METADATA_V1_DISABLED" - - awsS3DisableMultiRegionAccessPointsEnv = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" - - awsUseDualStackEndpointEnv = "AWS_USE_DUALSTACK_ENDPOINT" - - awsUseFIPSEndpointEnv = "AWS_USE_FIPS_ENDPOINT" - - awsDefaultsModeEnv = "AWS_DEFAULTS_MODE" - - awsMaxAttemptsEnv = "AWS_MAX_ATTEMPTS" - awsRetryModeEnv = "AWS_RETRY_MODE" - awsSdkUaAppIDEnv = "AWS_SDK_UA_APP_ID" - - awsIgnoreConfiguredEndpointURLEnv = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" - awsEndpointURLEnv = "AWS_ENDPOINT_URL" - - awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION" - awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" - - awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" - - awsAccountIDEnv = "AWS_ACCOUNT_ID" - awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" - - awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION" - awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION" - - awsAuthSchemePreferenceEnv = "AWS_AUTH_SCHEME_PREFERENCE" -) - -var ( - credAccessEnvKeys = []string{ - awsAccessKeyIDEnv, - awsAccessKeyEnv, - } - credSecretEnvKeys = []string{ - awsSecretAccessKeyEnv, - awsSecretKeyEnv, - } - regionEnvKeys = []string{ - awsRegionEnv, - awsDefaultRegionEnv, - } - profileEnvKeys = []string{ - awsProfileEnv, - awsDefaultProfileEnv, - } -) - -// EnvConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type EnvConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Credentials aws.Credentials - - // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials - // using the endpointcreds.Provider - ContainerCredentialsEndpoint string - - // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve - // credentials from the container endpoint. - ContainerCredentialsRelativePath string - - // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization - // header when attempting to retrieve credentials from the container credentials endpoint. - ContainerAuthorizationToken string - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-west-2 - // AWS_DEFAULT_REGION=us-west-2 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // AWS_DEFAULT_PROFILE=my_profile - SharedConfigProfile string - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the config. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion *bool - - // Specifies if the EC2 IMDS service client is enabled. - // - // AWS_EC2_METADATA_DISABLED=true - EC2IMDSClientEnableState imds.ClientEnableState - - // Specifies if EC2 IMDSv1 fallback is disabled. - // - // AWS_EC2_METADATA_V1_DISABLED=true - EC2IMDSv1Disabled *bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies if the S3 service should disable multi-region access points - // support. - // - // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true - S3DisableMultiRegionAccessPoints *bool - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // AWS_USE_DUALSTACK_ENDPOINT=true - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // AWS_USE_FIPS_ENDPOINT=true - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies the SDK Defaults Mode used by services. - // - // AWS_DEFAULTS_MODE=standard - DefaultsMode aws.DefaultsMode - - // Specifies the maximum number attempts an API client will call an - // operation that fails with a retryable error. - // - // AWS_MAX_ATTEMPTS=3 - RetryMaxAttempts int - - // Specifies the retry model the API client will be created with. - // - // aws_retry_mode=standard - RetryMode aws.RetryMode - - // aws sdk app ID that can be added to user agent header string - AppID string - - // Flag used to disable configured endpoints. - IgnoreConfiguredEndpoints *bool - - // Value to contain configured endpoints to be propagated to - // corresponding endpoint resolution field. - BaseEndpoint string - - // determine if request compression is allowed, default to false - // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION - DisableRequestCompression *bool - - // inclusive threshold request body size to trigger compression, - // default to 10240 and must be within 0 and 10485760 bytes inclusive - // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES - RequestMinCompressSizeBytes *int64 - - // Whether S3Express auth is disabled. - // - // This will NOT prevent requests from being made to S3Express buckets, it - // will only bypass the modified endpoint routing and signing behaviors - // associated with the feature. - S3DisableExpressAuth *bool - - // Indicates whether account ID will be required/ignored in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - - // Indicates whether request checksum should be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Indicates whether response checksum should be validated - ResponseChecksumValidation aws.ResponseChecksumValidation - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string -} - -// loadEnvConfig reads configuration values from the OS's environment variables. -// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. -func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { - return NewEnvConfig() -} - -// NewEnvConfig retrieves the SDK's environment configuration. -// See `EnvConfig` for the values that will be retrieved. -func NewEnvConfig() (EnvConfig, error) { - var cfg EnvConfig - - creds := aws.Credentials{ - Source: CredentialsSourceName, - } - setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) - setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) - if creds.HasKeys() { - creds.AccountID = os.Getenv(awsAccountIDEnv) - creds.SessionToken = os.Getenv(awsSessionTokenEnv) - cfg.Credentials = creds - } - - cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv) - cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv) - cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv) - - setStringFromEnvVal(&cfg.Region, regionEnvKeys) - setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) - - cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv) - cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv) - - cfg.CustomCABundle = os.Getenv(awsCABundleEnv) - - cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFileEnv) - - cfg.RoleARN = os.Getenv(awsRoleARNEnv) - cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnv) - - cfg.AppID = os.Getenv(awsSdkUaAppIDEnv) - - if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompressionEnv}); err != nil { - return cfg, err - } - if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytesEnv}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { - return cfg, err - } - - if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnv}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnv}); err != nil { - return cfg, err - } - - setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabledEnv}) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnv}); err != nil { - return cfg, err - } - cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnv) - if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnv}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointsEnv}); err != nil { - return cfg, err - } - - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpointEnv}); err != nil { - return cfg, err - } - - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpointEnv}); err != nil { - return cfg, err - } - - if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultsModeEnv}); err != nil { - return cfg, err - } - - if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsMaxAttemptsEnv}); err != nil { - return cfg, err - } - if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryModeEnv}); err != nil { - return cfg, err - } - - setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv}) - - if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil { - return cfg, err - } - - if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { - return cfg, err - } - - if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil { - return cfg, err - } - if err := setResponseChecksumValidationFromEnvVal(&cfg.ResponseChecksumValidation, []string{awsResponseChecksumValidation}); err != nil { - return cfg, err - } - - cfg.AuthSchemePreference = toAuthSchemePreferenceList(os.Getenv(awsAuthSchemePreferenceEnv)) - - return cfg, nil -} - -func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { - if len(c.DefaultsMode) == 0 { - return "", false, nil - } - return c.DefaultsMode, true, nil -} - -func (c EnvConfig) getAppID(context.Context) (string, bool, error) { - return c.AppID, len(c.AppID) > 0, nil -} - -func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) { - if c.DisableRequestCompression == nil { - return false, false, nil - } - return *c.DisableRequestCompression, true, nil -} - -func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) { - if c.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *c.RequestMinCompressSizeBytes, true, nil -} - -func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { - return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil -} - -func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) { - return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil -} - -func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) { - return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil -} - -// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, -// and not 0. -func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if c.RetryMaxAttempts == 0 { - return 0, false, nil - } - return c.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a -// valid value. -func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if len(c.RetryMode) == 0 { - return "", false, nil - } - return c.RetryMode, true, nil -} - -func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - switch { - case strings.EqualFold(value, "true"): - *state = imds.ClientDisabled - case strings.EqualFold(value, "false"): - *state = imds.ClientEnabled - default: - continue - } - break - } -} - -func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { - for _, k := range keys { - if value := os.Getenv(k); len(value) > 0 { - if ok := mode.SetFromString(value); !ok { - return fmt.Errorf("invalid %s value: %s", k, value) - } - break - } - } - return nil -} - -func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { - for _, k := range keys { - if value := os.Getenv(k); len(value) > 0 { - *mode, err = aws.ParseRetryMode(value) - if err != nil { - return fmt.Errorf("invalid %s value, %w", k, err) - } - break - } - } - return nil -} - -func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - if err := mode.SetFromString(value); err != nil { - return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) - } - } - return nil -} - -func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - switch value { - case "preferred": - *m = aws.AccountIDEndpointModePreferred - case "required": - *m = aws.AccountIDEndpointModeRequired - case "disabled": - *m = aws.AccountIDEndpointModeDisabled - default: - return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) - } - break - } - return nil -} - -func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - switch strings.ToLower(value) { - case checksumWhenSupported: - *m = aws.RequestChecksumCalculationWhenSupported - case checksumWhenRequired: - *m = aws.RequestChecksumCalculationWhenRequired - default: - return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) - } - } - return nil -} - -func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - switch strings.ToLower(value) { - case checksumWhenSupported: - *m = aws.ResponseChecksumValidationWhenSupported - case checksumWhenRequired: - *m = aws.ResponseChecksumValidationWhenRequired - default: - return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) - } - - } - return nil -} - -// GetRegion returns the AWS Region if set in the environment. Returns an empty -// string if not set. -func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { - if len(c.Region) == 0 { - return "", false, nil - } - return c.Region, true, nil -} - -// GetSharedConfigProfile returns the shared config profile if set in the -// environment. Returns an empty string if not set. -func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { - if len(c.SharedConfigProfile) == 0 { - return "", false, nil - } - - return c.SharedConfigProfile, true, nil -} - -// getSharedConfigFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Config -func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { - var files []string - if v := c.SharedConfigFile; len(v) > 0 { - files = append(files, v) - } - - if len(files) == 0 { - return nil, false, nil - } - return files, true, nil -} - -// getSharedCredentialsFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Credentials -func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { - var files []string - if v := c.SharedCredentialsFile; len(v) > 0 { - files = append(files, v) - } - if len(files) == 0 { - return nil, false, nil - } - return files, true, nil -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { - if len(c.CustomCABundle) == 0 { - return nil, false, nil - } - - b, err := os.ReadFile(c.CustomCABundle) - if err != nil { - return nil, false, err - } - return bytes.NewReader(b), true, nil -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { - if c.IgnoreConfiguredEndpoints == nil { - return false, false, nil - } - - return *c.IgnoreConfiguredEndpoints, true, nil -} - -func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { - return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil -} - -// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use -// with configured endpoints. -func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" { - return endpt, true, nil - } - return "", false, nil -} - -func normalizeEnv(sdkID string) string { - upper := strings.ToUpper(sdkID) - return strings.ReplaceAll(upper, " ", "_") -} - -// GetS3UseARNRegion returns whether to allow ARNs to direct the region -// the S3 client's requests are sent to. -func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { - if c.S3UseARNRegion == nil { - return false, false, nil - } - - return *c.S3UseARNRegion, true, nil -} - -// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point -// support for the S3 client. -func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { - if c.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - - return *c.S3DisableMultiRegionAccessPoints, true, nil -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - - return c.UseDualStackEndpoint, true, nil -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - - return c.UseFIPSEndpoint, true, nil -} - -func setStringFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - *dst = v - break - } - } -} - -func setIntFromEnvVal(dst *int, keys []string) error { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("invalid value %s=%s, %w", k, v, err) - } - *dst = int(i) - break - } - } - - return nil -} - -func setBoolPtrFromEnvVal(dst **bool, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - if *dst == nil { - *dst = new(bool) - } - - switch { - case strings.EqualFold(value, "false"): - **dst = false - case strings.EqualFold(value, "true"): - **dst = true - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - k, value) - } - break - } - - return nil -} - -func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - v, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value) - } else if v < 0 || v > max { - return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v) - } - if *dst == nil { - *dst = new(int64) - } - - **dst = v - break - } - - return nil -} - -func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, endpointDiscoveryDisabled): - *dst = aws.EndpointDiscoveryDisabled - case strings.EqualFold(value, endpointDiscoveryEnabled): - *dst = aws.EndpointDiscoveryEnabled - case strings.EqualFold(value, endpointDiscoveryAuto): - *dst = aws.EndpointDiscoveryAuto - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false or auto", - k, value) - } - } - return nil -} - -func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = aws.DualStackEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = aws.DualStackEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = aws.FIPSEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = aws.FIPSEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. -func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { - if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - - return c.EnableEndpointDiscovery, true, nil -} - -// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. -func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { - if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { - return imds.ClientDefaultEnableState, false, nil - } - - return c.EC2IMDSClientEnableState, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return c.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { - if len(c.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return c.EC2IMDSEndpoint, true, nil -} - -// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option -// resolver interface. -func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { - if c.EC2IMDSv1Disabled == nil { - return false, false - } - - return *c.EC2IMDSv1Disabled, true -} - -// GetS3DisableExpressAuth returns the configured value for -// [EnvConfig.S3DisableExpressAuth]. -func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) { - if c.S3DisableExpressAuth == nil { - return false, false - } - - return *c.S3DisableExpressAuth, true -} - -func (c EnvConfig) getAuthSchemePreference() ([]string, bool) { - if len(c.AuthSchemePreference) > 0 { - return c.AuthSchemePreference, true - } - return nil, false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go deleted file mode 100644 index 654a7a77fb7e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package config - -//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go -//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go deleted file mode 100644 index 0e8449a5f06b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package config - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.31.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go deleted file mode 100644 index 7cb5a136588a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ /dev/null @@ -1,1355 +0,0 @@ -package config - -import ( - "context" - "io" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// LoadOptionsFunc is a type alias for LoadOptions functional option -type LoadOptionsFunc func(*LoadOptions) error - -// LoadOptions are discrete set of options that are valid for loading the -// configuration -type LoadOptions struct { - - // Region is the region to send requests to. - Region string - - // Credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // Token provider for authentication operations with bearer authentication. - BearerAuthTokenProvider smithybearer.TokenProvider - - // HTTPClient the SDK's API clients will use to invoke HTTP requests. - HTTPClient HTTPClient - - // EndpointResolver that can be used to provide or override an endpoint for - // the given service and region. - // - // See the `aws.EndpointResolver` documentation on usage. - // - // Deprecated: See EndpointResolverWithOptions - EndpointResolver aws.EndpointResolver - - // EndpointResolverWithOptions that can be used to provide or override an - // endpoint for the given service and region. - // - // See the `aws.EndpointResolverWithOptions` documentation on usage. - EndpointResolverWithOptions aws.EndpointResolverWithOptions - - // RetryMaxAttempts specifies the maximum number attempts an API client - // will call an operation that fails with a retryable error. - // - // This value will only be used if Retryer option is nil. - RetryMaxAttempts int - - // RetryMode specifies the retry model the API client will be created with. - // - // This value will only be used if Retryer option is nil. - RetryMode aws.RetryMode - - // Retryer is a function that provides a Retryer implementation. A Retryer - // guides how HTTP requests should be retried in case of recoverable - // failures. - // - // If not nil, RetryMaxAttempts, and RetryMode will be ignored. - Retryer func() aws.Retryer - - // APIOptions provides the set of middleware mutations modify how the API - // client requests will be handled. This is useful for adding additional - // tracing data to a request, or changing behavior of the SDK's client. - APIOptions []func(*middleware.Stack) error - - // Logger writer interface to write logging messages to. - Logger logging.Logger - - // ClientLogMode is used to configure the events that will be sent to the - // configured logger. This can be used to configure the logging of signing, - // retries, request, and responses of the SDK clients. - // - // See the ClientLogMode type documentation for the complete set of logging - // modes and available configuration. - ClientLogMode *aws.ClientLogMode - - // SharedConfigProfile is the profile to be used when loading the SharedConfig - SharedConfigProfile string - - // SharedConfigFiles is the slice of custom shared config files to use when - // loading the SharedConfig. A non-default profile used within config file - // must have name defined with prefix 'profile '. eg [profile xyz] - // indicates a profile with name 'xyz'. To read more on the format of the - // config file, please refer the documentation at - // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config - // - // If duplicate profiles are provided within the same, or across multiple - // shared config files, the next parsed profile will override only the - // properties that conflict with the previously defined profile. Note that - // if duplicate profiles are provided within the SharedCredentialsFiles and - // SharedConfigFiles, the properties defined in shared credentials file - // take precedence. - SharedConfigFiles []string - - // SharedCredentialsFile is the slice of custom shared credentials files to - // use when loading the SharedConfig. The profile name used within - // credentials file must not prefix 'profile '. eg [xyz] indicates a - // profile with name 'xyz'. Profile declared as [profile xyz] will be - // ignored. To read more on the format of the credentials file, please - // refer the documentation at - // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds - // - // If duplicate profiles are provided with a same, or across multiple - // shared credentials files, the next parsed profile will override only - // properties that conflict with the previously defined profile. Note that - // if duplicate profiles are provided within the SharedCredentialsFiles and - // SharedConfigFiles, the properties defined in shared credentials file - // take precedence. - SharedCredentialsFiles []string - - // CustomCABundle is CA bundle PEM bytes reader - CustomCABundle io.Reader - - // DefaultRegion is the fall back region, used if a region was not resolved - // from other sources - DefaultRegion string - - // UseEC2IMDSRegion indicates if SDK should retrieve the region - // from the EC2 Metadata service - UseEC2IMDSRegion *UseEC2IMDSRegion - - // CredentialsCacheOptions is a function for setting the - // aws.CredentialsCacheOptions - CredentialsCacheOptions func(*aws.CredentialsCacheOptions) - - // BearerAuthTokenCacheOptions is a function for setting the smithy-go - // auth/bearer#TokenCacheOptions - BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) - - // SSOTokenProviderOptions is a function for setting the - // credentials/ssocreds.SSOTokenProviderOptions - SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) - - // ProcessCredentialOptions is a function for setting - // the processcreds.Options - ProcessCredentialOptions func(*processcreds.Options) - - // EC2RoleCredentialOptions is a function for setting - // the ec2rolecreds.Options - EC2RoleCredentialOptions func(*ec2rolecreds.Options) - - // EndpointCredentialOptions is a function for setting - // the endpointcreds.Options - EndpointCredentialOptions func(*endpointcreds.Options) - - // WebIdentityRoleCredentialOptions is a function for setting - // the stscreds.WebIdentityRoleOptions - WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) - - // AssumeRoleCredentialOptions is a function for setting the - // stscreds.AssumeRoleOptions - AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) - - // SSOProviderOptions is a function for setting - // the ssocreds.Options - SSOProviderOptions func(options *ssocreds.Options) - - // LogConfigurationWarnings when set to true, enables logging - // configuration warnings - LogConfigurationWarnings *bool - - // S3UseARNRegion specifies if the S3 service should allow ARNs to direct - // the region, the client's requests are sent to. - S3UseARNRegion *bool - - // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable - // the S3 Multi-Region access points feature. - S3DisableMultiRegionAccessPoints *bool - - // EnableEndpointDiscovery specifies if endpoint discovery is enable for - // the client. - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies if the EC2 IMDS service client is enabled. - // - // AWS_EC2_METADATA_DISABLED=true - EC2IMDSClientEnableState imds.ClientEnableState - - // Specifies the EC2 Instance Metadata Service default endpoint selection - // mode (IPv4 or IPv6) - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If - // specified it overrides EC2IMDSEndpointMode. - EC2IMDSEndpoint string - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies the SDK configuration mode for defaults. - DefaultsModeOptions DefaultsModeOptions - - // The sdk app ID retrieved from env var or shared config to be added to request user agent header - AppID string - - // Specifies whether an operation request could be compressed - DisableRequestCompression *bool - - // The inclusive min bytes of a request body that could be compressed - RequestMinCompressSizeBytes *int64 - - // Whether S3 Express auth is disabled. - S3DisableExpressAuth *bool - - // Whether account id should be built into endpoint resolution - AccountIDEndpointMode aws.AccountIDEndpointMode - - // Specify if request checksum should be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Specifies if response checksum should be validated - ResponseChecksumValidation aws.ResponseChecksumValidation - - // Service endpoint override. This value is not necessarily final and is - // passed to the service's EndpointResolverV2 for further delegation. - BaseEndpoint string - - // Registry of operation interceptors. - Interceptors smithyhttp.InterceptorRegistry - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string - - // ServiceOptions provides service specific configuration options that will be applied - // when constructing clients for specific services. Each callback function receives the service ID - // and the service's Options struct, allowing for dynamic configuration based on the service. - ServiceOptions []func(string, any) -} - -func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { - if len(o.DefaultsModeOptions.Mode) == 0 { - return "", false, nil - } - return o.DefaultsModeOptions.Mode, true, nil -} - -// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the -// LoadOptions and not 0. -func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if o.RetryMaxAttempts == 0 { - return 0, false, nil - } - return o.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the RetryMode specified in the LoadOptions. -func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if len(o.RetryMode) == 0 { - return "", false, nil - } - return o.RetryMode, true, nil -} - -func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { - if o.DefaultsModeOptions.IMDSClient == nil { - return nil, false, nil - } - return o.DefaultsModeOptions.IMDSClient, true, nil -} - -// getRegion returns Region from config's LoadOptions -func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { - if len(o.Region) == 0 { - return "", false, nil - } - - return o.Region, true, nil -} - -// getAppID returns AppID from config's LoadOptions -func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { - return o.AppID, len(o.AppID) > 0, nil -} - -// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions -func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { - if o.DisableRequestCompression == nil { - return false, false, nil - } - return *o.DisableRequestCompression, true, nil -} - -// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions -func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { - if o.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *o.RequestMinCompressSizeBytes, true, nil -} - -func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { - return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil -} - -func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { - return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil -} - -func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { - return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil -} - -func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) { - return o.BaseEndpoint, o.BaseEndpoint != "", nil -} - -func (o LoadOptions) getServiceOptions(context.Context) ([]func(string, any), bool, error) { - return o.ServiceOptions, len(o.ServiceOptions) > 0, nil -} - -// GetServiceBaseEndpoint satisfies (internal/configsources).ServiceBaseEndpointProvider. -// -// The sdkID value is unused because LoadOptions only supports setting a GLOBAL -// endpoint override. In-code, per-service endpoint overrides are performed via -// functional options in service client space. -func (o LoadOptions) GetServiceBaseEndpoint(context.Context, string) (string, bool, error) { - return o.BaseEndpoint, o.BaseEndpoint != "", nil -} - -// WithRegion is a helper function to construct functional options -// that sets Region on config's LoadOptions. Setting the region to -// an empty string, will result in the region value being ignored. -// If multiple WithRegion calls are made, the last call overrides -// the previous call values. -func WithRegion(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Region = v - return nil - } -} - -// WithAppID is a helper function to construct functional options -// that sets AppID on config's LoadOptions. -func WithAppID(ID string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AppID = ID - return nil - } -} - -// WithDisableRequestCompression is a helper function to construct functional options -// that sets DisableRequestCompression on config's LoadOptions. -func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - if DisableRequestCompression == nil { - return nil - } - o.DisableRequestCompression = DisableRequestCompression - return nil - } -} - -// WithRequestMinCompressSizeBytes is a helper function to construct functional options -// that sets RequestMinCompressSizeBytes on config's LoadOptions. -func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc { - return func(o *LoadOptions) error { - if RequestMinCompressSizeBytes == nil { - return nil - } - o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes - return nil - } -} - -// WithAccountIDEndpointMode is a helper function to construct functional options -// that sets AccountIDEndpointMode on config's LoadOptions -func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - if m != "" { - o.AccountIDEndpointMode = m - } - return nil - } -} - -// WithRequestChecksumCalculation is a helper function to construct functional options -// that sets RequestChecksumCalculation on config's LoadOptions -func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc { - return func(o *LoadOptions) error { - if c > 0 { - o.RequestChecksumCalculation = c - } - return nil - } -} - -// WithResponseChecksumValidation is a helper function to construct functional options -// that sets ResponseChecksumValidation on config's LoadOptions -func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ResponseChecksumValidation = v - return nil - } -} - -// getDefaultRegion returns DefaultRegion from config's LoadOptions -func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { - if len(o.DefaultRegion) == 0 { - return "", false, nil - } - - return o.DefaultRegion, true, nil -} - -// WithDefaultRegion is a helper function to construct functional options -// that sets a DefaultRegion on config's LoadOptions. Setting the default -// region to an empty string, will result in the default region value -// being ignored. If multiple WithDefaultRegion calls are made, the last -// call overrides the previous call values. Note that both WithRegion and -// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call -// when resolving region. -func WithDefaultRegion(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.DefaultRegion = v - return nil - } -} - -// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions -func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { - if len(o.SharedConfigProfile) == 0 { - return "", false, nil - } - - return o.SharedConfigProfile, true, nil -} - -// WithSharedConfigProfile is a helper function to construct functional options -// that sets SharedConfigProfile on config's LoadOptions. Setting the shared -// config profile to an empty string, will result in the shared config profile -// value being ignored. -// If multiple WithSharedConfigProfile calls are made, the last call overrides -// the previous call values. -func WithSharedConfigProfile(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedConfigProfile = v - return nil - } -} - -// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions -func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { - if o.SharedConfigFiles == nil { - return nil, false, nil - } - - return o.SharedConfigFiles, true, nil -} - -// WithSharedConfigFiles is a helper function to construct functional options -// that sets slice of SharedConfigFiles on config's LoadOptions. -// Setting the shared config files to an nil string slice, will result in the -// shared config files value being ignored. -// If multiple WithSharedConfigFiles calls are made, the last call overrides -// the previous call values. -func WithSharedConfigFiles(v []string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedConfigFiles = v - return nil - } -} - -// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions -func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { - if o.SharedCredentialsFiles == nil { - return nil, false, nil - } - - return o.SharedCredentialsFiles, true, nil -} - -// WithSharedCredentialsFiles is a helper function to construct functional options -// that sets slice of SharedCredentialsFiles on config's LoadOptions. -// Setting the shared credentials files to an nil string slice, will result in the -// shared credentials files value being ignored. -// If multiple WithSharedCredentialsFiles calls are made, the last call overrides -// the previous call values. -func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedCredentialsFiles = v - return nil - } -} - -// getCustomCABundle returns CustomCABundle from LoadOptions -func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { - if o.CustomCABundle == nil { - return nil, false, nil - } - - return o.CustomCABundle, true, nil -} - -// WithCustomCABundle is a helper function to construct functional options -// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle -// to nil will result in custom CA Bundle value being ignored. -// If multiple WithCustomCABundle calls are made, the last call overrides the -// previous call values. -func WithCustomCABundle(v io.Reader) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.CustomCABundle = v - return nil - } -} - -// UseEC2IMDSRegion provides a regionProvider that retrieves the region -// from the EC2 Metadata service. -type UseEC2IMDSRegion struct { - // If unset will default to generic EC2 IMDS client. - Client *imds.Client -} - -// getRegion attempts to retrieve the region from EC2 Metadata service. -func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { - if ctx == nil { - ctx = context.Background() - } - - client := p.Client - if client == nil { - client = imds.New(imds.Options{}) - } - - result, err := client.GetRegion(ctx, nil) - if err != nil { - return "", false, err - } - if len(result.Region) != 0 { - return result.Region, true, nil - } - return "", false, nil -} - -// getEC2IMDSRegion returns the value of EC2 IMDS region. -func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { - if o.UseEC2IMDSRegion == nil { - return "", false, nil - } - - return o.UseEC2IMDSRegion.getRegion(ctx) -} - -// WithEC2IMDSRegion is a helper function to construct functional options -// that enables resolving EC2IMDS region. The function takes -// in a UseEC2IMDSRegion functional option, and can be used to set the -// EC2IMDS client which will be used to resolve EC2IMDSRegion. -// If no functional option is provided, an EC2IMDS client is built and used -// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last -// call overrides the previous call values. Note that the WithRegion calls takes -// precedence over WithEC2IMDSRegion when resolving region. -func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} - - for _, fn := range fnOpts { - fn(o.UseEC2IMDSRegion) - } - return nil - } -} - -// getCredentialsProvider returns the credentials value -func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { - if o.Credentials == nil { - return nil, false, nil - } - - return o.Credentials, true, nil -} - -// WithCredentialsProvider is a helper function to construct functional options -// that sets Credential provider value on config's LoadOptions. If credentials -// provider is set to nil, the credentials provider value will be ignored. -// If multiple WithCredentialsProvider calls are made, the last call overrides -// the previous call values. -func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Credentials = v - return nil - } -} - -// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions -func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { - if o.CredentialsCacheOptions == nil { - return nil, false, nil - } - - return o.CredentialsCacheOptions, true, nil -} - -// WithCredentialsCacheOptions is a helper function to construct functional -// options that sets a function to modify the aws.CredentialsCacheOptions the -// aws.CredentialsCache will be configured with, if the CredentialsCache is used -// by the configuration loader. -// -// If multiple WithCredentialsCacheOptions calls are made, the last call -// overrides the previous call values. -func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.CredentialsCacheOptions = v - return nil - } -} - -// getBearerAuthTokenProvider returns the credentials value -func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { - if o.BearerAuthTokenProvider == nil { - return nil, false, nil - } - - return o.BearerAuthTokenProvider, true, nil -} - -// WithBearerAuthTokenProvider is a helper function to construct functional options -// that sets Credential provider value on config's LoadOptions. If credentials -// provider is set to nil, the credentials provider value will be ignored. -// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides -// the previous call values. -func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BearerAuthTokenProvider = v - return nil - } -} - -// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions -func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { - if o.BearerAuthTokenCacheOptions == nil { - return nil, false, nil - } - - return o.BearerAuthTokenCacheOptions, true, nil -} - -// WithBearerAuthTokenCacheOptions is a helper function to construct functional options -// that sets a function to modify the TokenCacheOptions the smithy-go -// auth/bearer#TokenCache will be configured with, if the TokenCache is used by -// the configuration loader. -// -// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides -// the previous call values. -func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BearerAuthTokenCacheOptions = v - return nil - } -} - -// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions -func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { - if o.SSOTokenProviderOptions == nil { - return nil, false, nil - } - - return o.SSOTokenProviderOptions, true, nil -} - -// WithSSOTokenProviderOptions is a helper function to construct functional -// options that sets a function to modify the SSOtokenProviderOptions the SDK's -// credentials/ssocreds#SSOProvider will be configured with, if the -// SSOTokenProvider is used by the configuration loader. -// -// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides -// the previous call values. -func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SSOTokenProviderOptions = v - return nil - } -} - -// getProcessCredentialOptions returns the wrapped function to set processcreds.Options -func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { - if o.ProcessCredentialOptions == nil { - return nil, false, nil - } - - return o.ProcessCredentialOptions, true, nil -} - -// WithProcessCredentialOptions is a helper function to construct functional options -// that sets a function to use processcreds.Options on config's LoadOptions. -// If process credential options is set to nil, the process credential value will -// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call -// overrides the previous call values. -func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ProcessCredentialOptions = v - return nil - } -} - -// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options -func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { - if o.EC2RoleCredentialOptions == nil { - return nil, false, nil - } - - return o.EC2RoleCredentialOptions, true, nil -} - -// WithEC2RoleCredentialOptions is a helper function to construct functional options -// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If -// EC2 role credential options is set to nil, the EC2 role credential options value -// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, -// the last call overrides the previous call values. -func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2RoleCredentialOptions = v - return nil - } -} - -// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options -func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { - if o.EndpointCredentialOptions == nil { - return nil, false, nil - } - - return o.EndpointCredentialOptions, true, nil -} - -// WithEndpointCredentialOptions is a helper function to construct functional options -// that sets a function to use endpointcreds.Options on config's LoadOptions. If -// endpoint credential options is set to nil, the endpoint credential options -// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, -// the last call overrides the previous call values. -func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointCredentialOptions = v - return nil - } -} - -// getWebIdentityRoleCredentialOptions returns the wrapped function -func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { - if o.WebIdentityRoleCredentialOptions == nil { - return nil, false, nil - } - - return o.WebIdentityRoleCredentialOptions, true, nil -} - -// WithWebIdentityRoleCredentialOptions is a helper function to construct -// functional options that sets a function to use stscreds.WebIdentityRoleOptions -// on config's LoadOptions. If web identity role credentials options is set to nil, -// the web identity role credentials value will be ignored. If multiple -// WithWebIdentityRoleCredentialOptions calls are made, the last call -// overrides the previous call values. -func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.WebIdentityRoleCredentialOptions = v - return nil - } -} - -// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions -func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { - if o.AssumeRoleCredentialOptions == nil { - return nil, false, nil - } - - return o.AssumeRoleCredentialOptions, true, nil -} - -// WithAssumeRoleCredentialOptions is a helper function to construct -// functional options that sets a function to use stscreds.AssumeRoleOptions -// on config's LoadOptions. If assume role credentials options is set to nil, -// the assume role credentials value will be ignored. If multiple -// WithAssumeRoleCredentialOptions calls are made, the last call overrides -// the previous call values. -func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AssumeRoleCredentialOptions = v - return nil - } -} - -func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { - if o.HTTPClient == nil { - return nil, false, nil - } - - return o.HTTPClient, true, nil -} - -// WithHTTPClient is a helper function to construct functional options -// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, -// the HTTPClient value will be ignored. -// If multiple WithHTTPClient calls are made, the last call overrides -// the previous call values. -func WithHTTPClient(v HTTPClient) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.HTTPClient = v - return nil - } -} - -func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { - if o.APIOptions == nil { - return nil, false, nil - } - - return o.APIOptions, true, nil -} - -// WithAPIOptions is a helper function to construct functional options -// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the -// APIOptions value is ignored. If multiple WithAPIOptions calls are -// made, the last call overrides the previous call values. -func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { - return func(o *LoadOptions) error { - if v == nil { - return nil - } - - o.APIOptions = append(o.APIOptions, v...) - return nil - } -} - -func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if o.RetryMaxAttempts == 0 { - return 0, false, nil - } - - return o.RetryMaxAttempts, true, nil -} - -// WithRetryMaxAttempts is a helper function to construct functional options that sets -// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is -// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides -// the previous call values. -// -// Will be ignored of LoadOptions.Retryer or WithRetryer are used. -func WithRetryMaxAttempts(v int) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.RetryMaxAttempts = v - return nil - } -} - -func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if o.RetryMode == "" { - return "", false, nil - } - - return o.RetryMode, true, nil -} - -// WithRetryMode is a helper function to construct functional options that sets -// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is -// ignored. If multiple WithRetryMode calls are made, the last call overrides -// the previous call values. -// -// Will be ignored of LoadOptions.Retryer or WithRetryer are used. -func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.RetryMode = v - return nil - } -} - -func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { - if o.Retryer == nil { - return nil, false, nil - } - - return o.Retryer, true, nil -} - -// WithRetryer is a helper function to construct functional options -// that sets Retryer on LoadOptions. If Retryer is set to nil, the -// Retryer value is ignored. If multiple WithRetryer calls are -// made, the last call overrides the previous call values. -func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Retryer = v - return nil - } -} - -func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { - if o.EndpointResolver == nil { - return nil, false, nil - } - - return o.EndpointResolver, true, nil -} - -// WithEndpointResolver is a helper function to construct functional options -// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, -// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls -// are made, the last call overrides the previous call values. -// -// Deprecated: The global endpoint resolution interface is deprecated. The API -// for endpoint resolution is now unique to each service and is set via the -// EndpointResolverV2 field on service client options. Use of -// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you -// from using any endpoint-related service features released after the -// introduction of EndpointResolverV2. You may also encounter broken or -// unexpected behavior when using the old global interface with services that -// use many endpoint-related customizations such as S3. -func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointResolver = v - return nil - } -} - -func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { - if o.EndpointResolverWithOptions == nil { - return nil, false, nil - } - - return o.EndpointResolverWithOptions, true, nil -} - -// WithEndpointResolverWithOptions is a helper function to construct functional options -// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, -// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls -// are made, the last call overrides the previous call values. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [WithEndpointResolver]. -func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointResolverWithOptions = v - return nil - } -} - -func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { - if o.Logger == nil { - return nil, false, nil - } - - return o.Logger, true, nil -} - -// WithLogger is a helper function to construct functional options -// that sets Logger on LoadOptions. If Logger is set to nil, the -// Logger value will be ignored. If multiple WithLogger calls are made, -// the last call overrides the previous call values. -func WithLogger(v logging.Logger) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Logger = v - return nil - } -} - -func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { - if o.ClientLogMode == nil { - return 0, false, nil - } - - return *o.ClientLogMode, true, nil -} - -// WithClientLogMode is a helper function to construct functional options -// that sets client log mode on LoadOptions. If client log mode is set to nil, -// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, -// the last call overrides the previous call values. -func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ClientLogMode = &v - return nil - } -} - -func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { - if o.LogConfigurationWarnings == nil { - return false, false, nil - } - return *o.LogConfigurationWarnings, true, nil -} - -// WithLogConfigurationWarnings is a helper function to construct -// functional options that can be used to set LogConfigurationWarnings -// on LoadOptions. -// -// If multiple WithLogConfigurationWarnings calls are made, the last call -// overrides the previous call values. -func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.LogConfigurationWarnings = &v - return nil - } -} - -// GetS3UseARNRegion returns whether to allow ARNs to direct the region -// the S3 client's requests are sent to. -func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { - if o.S3UseARNRegion == nil { - return false, false, nil - } - return *o.S3UseARNRegion, true, nil -} - -// WithS3UseARNRegion is a helper function to construct functional options -// that can be used to set S3UseARNRegion on LoadOptions. -// If multiple WithS3UseARNRegion calls are made, the last call overrides -// the previous call values. -func WithS3UseARNRegion(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3UseARNRegion = &v - return nil - } -} - -// GetS3DisableMultiRegionAccessPoints returns whether to disable -// the S3 multi-region access points feature. -func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) { - if o.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - return *o.S3DisableMultiRegionAccessPoints, true, nil -} - -// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options -// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions. -// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides -// the previous call values. -func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3DisableMultiRegionAccessPoints = &v - return nil - } -} - -// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. -func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { - if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - return o.EnableEndpointDiscovery, true, nil -} - -// WithEndpointDiscovery is a helper function to construct functional options -// that can be used to enable endpoint discovery on LoadOptions for supported clients. -// If multiple WithEndpointDiscovery calls are made, the last call overrides -// the previous call values. -func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EnableEndpointDiscovery = v - return nil - } -} - -// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions -func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { - if o.SSOProviderOptions == nil { - return nil, false, nil - } - - return o.SSOProviderOptions, true, nil -} - -// WithSSOProviderOptions is a helper function to construct -// functional options that sets a function to use ssocreds.Options -// on config's LoadOptions. If the SSO credential provider options is set to nil, -// the sso provider options value will be ignored. If multiple -// WithSSOProviderOptions calls are made, the last call overrides -// the previous call values. -func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SSOProviderOptions = v - return nil - } -} - -// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. -func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { - if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { - return imds.ClientDefaultEnableState, false, nil - } - - return o.EC2IMDSClientEnableState, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return o.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { - if len(o.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return o.EC2IMDSEndpoint, true, nil -} - -// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. -func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSClientEnableState = v - return nil - } -} - -// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. -func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSEndpointMode = v - return nil - } -} - -// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. -func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSEndpoint = v - return nil - } -} - -// WithUseDualStackEndpoint is a helper function to construct -// functional options that can be used to set UseDualStackEndpoint on LoadOptions. -func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseDualStackEndpoint = v - return nil - } -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - return o.UseDualStackEndpoint, true, nil -} - -// WithUseFIPSEndpoint is a helper function to construct -// functional options that can be used to set UseFIPSEndpoint on LoadOptions. -func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseFIPSEndpoint = v - return nil - } -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - return o.UseFIPSEndpoint, true, nil -} - -// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. -// -// Zero or more functional options can be provided to provide configuration options for performing -// environment discovery when using aws.DefaultsModeAuto. -func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { - do := DefaultsModeOptions{ - Mode: mode, - } - for _, fn := range optFns { - fn(&do) - } - return func(options *LoadOptions) error { - options.DefaultsModeOptions = do - return nil - } -} - -// GetS3DisableExpressAuth returns the configured value for -// [EnvConfig.S3DisableExpressAuth]. -func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) { - if o.S3DisableExpressAuth == nil { - return false, false - } - - return *o.S3DisableExpressAuth, true -} - -// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth] -// to the value provided. -func WithS3DisableExpressAuth(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3DisableExpressAuth = &v - return nil - } -} - -// WithBaseEndpoint is a helper function to construct functional options that -// sets BaseEndpoint on config's LoadOptions. Empty values have no effect, and -// subsequent calls to this API override previous ones. -// -// This is an in-code setting, therefore, any value set using this hook takes -// precedence over and will override ALL environment and shared config -// directives that set endpoint URLs. Functional options on service clients -// have higher specificity, and functional options that modify the value of -// BaseEndpoint on a client will take precedence over this setting. -func WithBaseEndpoint(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BaseEndpoint = v - return nil - } -} - -// WithServiceOptions is a helper function to construct functional options -// that sets ServiceOptions on config's LoadOptions. -func WithServiceOptions(callbacks ...func(string, any)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ServiceOptions = append(o.ServiceOptions, callbacks...) - return nil - } -} - -// WithBeforeExecution adds the BeforeExecutionInterceptor to config. -func WithBeforeExecution(i smithyhttp.BeforeExecutionInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeExecution = append(o.Interceptors.BeforeExecution, i) - return nil - } -} - -// WithBeforeSerialization adds the BeforeSerializationInterceptor to config. -func WithBeforeSerialization(i smithyhttp.BeforeSerializationInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeSerialization = append(o.Interceptors.BeforeSerialization, i) - return nil - } -} - -// WithAfterSerialization adds the AfterSerializationInterceptor to config. -func WithAfterSerialization(i smithyhttp.AfterSerializationInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.AfterSerialization = append(o.Interceptors.AfterSerialization, i) - return nil - } -} - -// WithBeforeRetryLoop adds the BeforeRetryLoopInterceptor to config. -func WithBeforeRetryLoop(i smithyhttp.BeforeRetryLoopInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeRetryLoop = append(o.Interceptors.BeforeRetryLoop, i) - return nil - } -} - -// WithBeforeAttempt adds the BeforeAttemptInterceptor to config. -func WithBeforeAttempt(i smithyhttp.BeforeAttemptInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeAttempt = append(o.Interceptors.BeforeAttempt, i) - return nil - } -} - -// WithBeforeSigning adds the BeforeSigningInterceptor to config. -func WithBeforeSigning(i smithyhttp.BeforeSigningInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeSigning = append(o.Interceptors.BeforeSigning, i) - return nil - } -} - -// WithAfterSigning adds the AfterSigningInterceptor to config. -func WithAfterSigning(i smithyhttp.AfterSigningInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.AfterSigning = append(o.Interceptors.AfterSigning, i) - return nil - } -} - -// WithBeforeTransmit adds the BeforeTransmitInterceptor to config. -func WithBeforeTransmit(i smithyhttp.BeforeTransmitInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeTransmit = append(o.Interceptors.BeforeTransmit, i) - return nil - } -} - -// WithAfterTransmit adds the AfterTransmitInterceptor to config. -func WithAfterTransmit(i smithyhttp.AfterTransmitInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.AfterTransmit = append(o.Interceptors.AfterTransmit, i) - return nil - } -} - -// WithBeforeDeserialization adds the BeforeDeserializationInterceptor to config. -func WithBeforeDeserialization(i smithyhttp.BeforeDeserializationInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.BeforeDeserialization = append(o.Interceptors.BeforeDeserialization, i) - return nil - } -} - -// WithAfterDeserialization adds the AfterDeserializationInterceptor to config. -func WithAfterDeserialization(i smithyhttp.AfterDeserializationInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.AfterDeserialization = append(o.Interceptors.AfterDeserialization, i) - return nil - } -} - -// WithAfterAttempt adds the AfterAttemptInterceptor to config. -func WithAfterAttempt(i smithyhttp.AfterAttemptInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.AfterAttempt = append(o.Interceptors.AfterAttempt, i) - return nil - } -} - -// WithAfterExecution adds the AfterExecutionInterceptor to config. -func WithAfterExecution(i smithyhttp.AfterExecutionInterceptor) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Interceptors.AfterExecution = append(o.Interceptors.AfterExecution, i) - return nil - } -} - -// WithAuthSchemePreference sets the priority order of auth schemes on config. -// -// Schemes are expressed as names e.g. sigv4a or sigv4. -func WithAuthSchemePreference(schemeIDs ...string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AuthSchemePreference = schemeIDs - return nil - } -} - -func (o LoadOptions) getAuthSchemePreference() ([]string, bool) { - if len(o.AuthSchemePreference) > 0 { - return o.AuthSchemePreference, true - } - return nil, false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go deleted file mode 100644 index b629137c8218..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go +++ /dev/null @@ -1,51 +0,0 @@ -package config - -import ( - "fmt" - "net" - "net/url" -) - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - if len(addrs) == 0 { - return false, fmt.Errorf("no addrs found for host, %s", host) - } - - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func validateLocalURL(v string) error { - u, err := url.Parse(v) - if err != nil { - return err - } - - host := u.Hostname() - if len(host) == 0 { - return fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if isLoopback, err := isLoopbackHost(host); err != nil { - return fmt.Errorf("failed to resolve host %q, %v", host, err) - } else if !isLoopback { - return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go deleted file mode 100644 index 18b9b5ad20e0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ /dev/null @@ -1,786 +0,0 @@ -package config - -import ( - "context" - "io" - "net/http" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// sharedConfigProfileProvider provides access to the shared config profile -// name external configuration value. -type sharedConfigProfileProvider interface { - getSharedConfigProfile(ctx context.Context) (string, bool, error) -} - -// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedConfigProfileProvider); ok { - value, found, err = p.getSharedConfigProfile(ctx) - if err != nil || found { - break - } - } - } - return -} - -// sharedConfigFilesProvider provides access to the shared config filesnames -// external configuration value. -type sharedConfigFilesProvider interface { - getSharedConfigFiles(ctx context.Context) ([]string, bool, error) -} - -// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedConfigFilesProvider); ok { - value, found, err = p.getSharedConfigFiles(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// sharedCredentialsFilesProvider provides access to the shared credentials filesnames -// external configuration value. -type sharedCredentialsFilesProvider interface { - getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) -} - -// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedCredentialsFilesProvider); ok { - value, found, err = p.getSharedCredentialsFiles(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// customCABundleProvider provides access to the custom CA bundle PEM bytes. -type customCABundleProvider interface { - getCustomCABundle(ctx context.Context) (io.Reader, bool, error) -} - -// getCustomCABundle searches the configs for a customCABundleProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(customCABundleProvider); ok { - value, found, err = p.getCustomCABundle(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// regionProvider provides access to the region external configuration value. -type regionProvider interface { - getRegion(ctx context.Context) (string, bool, error) -} - -// getRegion searches the configs for a regionProvider and returns the value -// if found. Returns an error if a provider fails before a value is found. -func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(regionProvider); ok { - value, found, err = p.getRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// IgnoreConfiguredEndpointsProvider is needed to search for all providers -// that provide a flag to disable configured endpoints. -type IgnoreConfiguredEndpointsProvider interface { - GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { - value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) - if err != nil || found { - break - } - } - } - return -} - -type baseEndpointProvider interface { - getBaseEndpoint(ctx context.Context) (string, bool, error) -} - -func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(baseEndpointProvider); ok { - value, found, err = p.getBaseEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} - -type servicesObjectProvider interface { - getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) -} - -func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(servicesObjectProvider); ok { - value, found, err = p.getServicesObject(ctx) - if err != nil || found { - break - } - } - } - return -} - -// appIDProvider provides access to the sdk app ID value -type appIDProvider interface { - getAppID(ctx context.Context) (string, bool, error) -} - -func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(appIDProvider); ok { - value, found, err = p.getAppID(ctx) - if err != nil || found { - break - } - } - } - return -} - -// disableRequestCompressionProvider provides access to the DisableRequestCompression -type disableRequestCompressionProvider interface { - getDisableRequestCompression(context.Context) (bool, bool, error) -} - -func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(disableRequestCompressionProvider); ok { - value, found, err = p.getDisableRequestCompression(ctx) - if err != nil || found { - break - } - } - } - return -} - -// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes -type requestMinCompressSizeBytesProvider interface { - getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) -} - -func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok { - value, found, err = p.getRequestMinCompressSizeBytes(ctx) - if err != nil || found { - break - } - } - } - return -} - -// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode -type accountIDEndpointModeProvider interface { - getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) -} - -func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(accountIDEndpointModeProvider); ok { - value, found, err = p.getAccountIDEndpointMode(ctx) - if err != nil || found { - break - } - } - } - return -} - -// requestChecksumCalculationProvider provides access to the RequestChecksumCalculation -type requestChecksumCalculationProvider interface { - getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) -} - -func getRequestChecksumCalculation(ctx context.Context, configs configs) (value aws.RequestChecksumCalculation, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(requestChecksumCalculationProvider); ok { - value, found, err = p.getRequestChecksumCalculation(ctx) - if err != nil || found { - break - } - } - } - return -} - -// responseChecksumValidationProvider provides access to the ResponseChecksumValidation -type responseChecksumValidationProvider interface { - getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) -} - -func getResponseChecksumValidation(ctx context.Context, configs configs) (value aws.ResponseChecksumValidation, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(responseChecksumValidationProvider); ok { - value, found, err = p.getResponseChecksumValidation(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ec2IMDSRegionProvider provides access to the ec2 imds region -// configuration value -type ec2IMDSRegionProvider interface { - getEC2IMDSRegion(ctx context.Context) (string, bool, error) -} - -// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and -// returns the value if found. Returns an error if a provider fails before -// a value is found. -func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(ec2IMDSRegionProvider); ok { - region, found, err = provider.getEC2IMDSRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// credentialsProviderProvider provides access to the credentials external -// configuration value. -type credentialsProviderProvider interface { - getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) -} - -// getCredentialsProvider searches the configs for a credentialsProviderProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(credentialsProviderProvider); ok { - p, found, err = provider.getCredentialsProvider(ctx) - if err != nil || found { - break - } - } - } - return -} - -// credentialsCacheOptionsProvider is an interface for retrieving a function for setting -// the aws.CredentialsCacheOptions. -type credentialsCacheOptionsProvider interface { - getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) -} - -// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting -// the aws.CredentialsCacheOptions. -func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( - f func(*aws.CredentialsCacheOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(credentialsCacheOptionsProvider); ok { - f, found, err = p.getCredentialsCacheOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// bearerAuthTokenProviderProvider provides access to the bearer authentication -// token external configuration value. -type bearerAuthTokenProviderProvider interface { - getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) -} - -// getBearerAuthTokenProvider searches the config sources for a -// bearerAuthTokenProviderProvider and returns the value if found. Returns an -// error if a provider fails before a value is found. -func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { - p, found, err = provider.getBearerAuthTokenProvider(ctx) - if err != nil || found { - break - } - } - } - return -} - -// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for -// setting the smithy-go auth/bearer#TokenCacheOptions. -type bearerAuthTokenCacheOptionsProvider interface { - getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) -} - -// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for -// setting the smithy-go auth/bearer#TokenCacheOptions. -func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( - f func(*smithybearer.TokenCacheOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { - f, found, err = p.getBearerAuthTokenCacheOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoTokenProviderOptionsProvider is an interface for retrieving a function for -// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. -type ssoTokenProviderOptionsProvider interface { - getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) -} - -// getSSOTokenProviderOptions is an interface for retrieving a function for -// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. -func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( - f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(ssoTokenProviderOptionsProvider); ok { - f, found, err = p.getSSOTokenProviderOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoTokenProviderOptionsProvider - -// processCredentialOptions is an interface for retrieving a function for setting -// the processcreds.Options. -type processCredentialOptions interface { - getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) -} - -// getProcessCredentialOptions searches the slice of configs and returns the first function found -func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(processCredentialOptions); ok { - f, found, err = p.getProcessCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ec2RoleCredentialOptionsProvider is an interface for retrieving a function -// for setting the ec2rolecreds.Provider options. -type ec2RoleCredentialOptionsProvider interface { - getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) -} - -// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found -func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { - f, found, err = p.getEC2RoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources -type defaultRegionProvider interface { - getDefaultRegion(ctx context.Context) (string, bool, error) -} - -// getDefaultRegion searches the slice of configs and returns the first fallback region found -func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, config := range configs { - if p, ok := config.(defaultRegionProvider); ok { - value, found, err = p.getDefaultRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointCredentialOptionsProvider is an interface for retrieving a function for setting -// the endpointcreds.ProviderOptions. -type endpointCredentialOptionsProvider interface { - getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) -} - -// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found -func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(endpointCredentialOptionsProvider); ok { - f, found, err = p.getEndpointCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting -// the stscreds.WebIdentityRoleProvider. -type webIdentityRoleCredentialOptionsProvider interface { - getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) -} - -// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found -func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { - for _, config := range configs { - if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { - f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting -// the stscreds.AssumeRoleOptions. -type assumeRoleCredentialOptionsProvider interface { - getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) -} - -// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found -func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { - for _, config := range configs { - if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { - f, found, err = p.getAssumeRoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// HTTPClient is an HTTP client implementation -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// httpClientProvider is an interface for retrieving HTTPClient -type httpClientProvider interface { - getHTTPClient(ctx context.Context) (HTTPClient, bool, error) -} - -// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs -func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { - for _, config := range configs { - if p, ok := config.(httpClientProvider); ok { - client, found, err = p.getHTTPClient(ctx) - if err != nil || found { - break - } - } - } - return -} - -// apiOptionsProvider is an interface for retrieving APIOptions -type apiOptionsProvider interface { - getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) -} - -// getAPIOptions searches the slice of configs and returns the APIOptions set on configs -func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { - for _, config := range configs { - if p, ok := config.(apiOptionsProvider); ok { - // retrieve APIOptions from configs and set it on cfg - apiOptions, found, err = p.getAPIOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source -type endpointResolverProvider interface { - getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) -} - -// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used -// to configure the aws.Config.EndpointResolver value. -func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { - for _, c := range configs { - if p, ok := c.(endpointResolverProvider); ok { - f, found, err = p.getEndpointResolver(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source -type endpointResolverWithOptionsProvider interface { - getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) -} - -// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used -// to configure the aws.Config.EndpointResolver value. -func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { - for _, c := range configs { - if p, ok := c.(endpointResolverWithOptionsProvider); ok { - f, found, err = p.getEndpointResolverWithOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. -type loggerProvider interface { - getLogger(ctx context.Context) (logging.Logger, bool, error) -} - -// getLogger searches the provided config sources for a logging.Logger that can be used -// to configure the aws.Config.Logger value. -func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { - for _, c := range configs { - if p, ok := c.(loggerProvider); ok { - l, found, err = p.getLogger(ctx) - if err != nil || found { - break - } - } - } - return -} - -// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. -type clientLogModeProvider interface { - getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) -} - -func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(clientLogModeProvider); ok { - m, found, err = p.getClientLogMode(ctx) - if err != nil || found { - break - } - } - } - return -} - -// retryProvider is an configuration provider for custom Retryer. -type retryProvider interface { - getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) -} - -func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryProvider); ok { - v, found, err = p.getRetryer(ctx) - if err != nil || found { - break - } - } - } - return -} - -// logConfigurationWarningsProvider is an configuration provider for -// retrieving a boolean indicating whether configuration issues should -// be logged when loading from config sources -type logConfigurationWarningsProvider interface { - getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) -} - -func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { - for _, c := range configs { - if p, ok := c.(logConfigurationWarningsProvider); ok { - v, found, err = p.getLogConfigurationWarnings(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoCredentialOptionsProvider is an interface for retrieving a function for setting -// the ssocreds.Options. -type ssoCredentialOptionsProvider interface { - getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) -} - -func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { - for _, c := range configs { - if p, ok := c.(ssoCredentialOptionsProvider); ok { - v, found, err = p.getSSOProviderOptions(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type defaultsModeIMDSClientProvider interface { - getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) -} - -func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { - for _, c := range configs { - if p, ok := c.(defaultsModeIMDSClientProvider); ok { - v, found, err = p.getDefaultsModeIMDSClient(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type defaultsModeProvider interface { - getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) -} - -func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(defaultsModeProvider); ok { - v, found, err = p.getDefaultsMode(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type retryMaxAttemptsProvider interface { - GetRetryMaxAttempts(context.Context) (int, bool, error) -} - -func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryMaxAttemptsProvider); ok { - v, found, err = p.GetRetryMaxAttempts(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type retryModeProvider interface { - GetRetryMode(context.Context) (aws.RetryMode, bool, error) -} - -func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryModeProvider); ok { - v, found, err = p.GetRetryMode(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -func getAuthSchemePreference(ctx context.Context, configs configs) ([]string, bool) { - type provider interface { - getAuthSchemePreference() ([]string, bool) - } - - for _, cfg := range configs { - if p, ok := cfg.(provider); ok { - if v, ok := p.getAuthSchemePreference(); ok { - return v, true - } - } - } - return nil, false -} - -type serviceOptionsProvider interface { - getServiceOptions(ctx context.Context) ([]func(string, any), bool, error) -} - -func getServiceOptions(ctx context.Context, configs configs) (v []func(string, any), found bool, err error) { - for _, c := range configs { - if p, ok := c.(serviceOptionsProvider); ok { - v, found, err = p.getServiceOptions(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go deleted file mode 100644 index 92a16d718df9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ /dev/null @@ -1,444 +0,0 @@ -package config - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/smithy-go/logging" -) - -// resolveDefaultAWSConfig will write default configuration values into the cfg -// value. It will write the default values, overwriting any previous value. -// -// This should be used as the first resolver in the slice of resolvers when -// resolving external configuration. -func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { - var sources []interface{} - for _, s := range cfgs { - sources = append(sources, s) - } - - *cfg = aws.Config{ - Logger: logging.NewStandardLogger(os.Stderr), - ConfigSources: sources, - } - return nil -} - -// resolveCustomCABundle extracts the first instance of a custom CA bundle filename -// from the external configurations. It will update the HTTP Client's builder -// to be configured with the custom CA bundle. -// -// Config provider used: -// * customCABundleProvider -func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { - pemCerts, found, err := getCustomCABundle(ctx, cfgs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - if cfg.HTTPClient == nil { - cfg.HTTPClient = awshttp.NewBuildableClient() - } - - trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ - "has no WithTransportOptions, %T", cfg.HTTPClient) - } - - var appendErr error - client := trOpts.WithTransportOptions(func(tr *http.Transport) { - if tr.TLSClientConfig == nil { - tr.TLSClientConfig = &tls.Config{} - } - if tr.TLSClientConfig.RootCAs == nil { - tr.TLSClientConfig.RootCAs = x509.NewCertPool() - } - - b, err := ioutil.ReadAll(pemCerts) - if err != nil { - appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") - } - - if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { - appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") - } - }) - if appendErr != nil { - return appendErr - } - - cfg.HTTPClient = client - return err -} - -// resolveRegion extracts the first instance of a Region from the configs slice. -// -// Config providers used: -// * regionProvider -func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - v, found, err := getRegion(ctx, configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfg.Region = v - return nil -} - -func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { - var downcastCfgSources []interface{} - for _, cs := range configs { - downcastCfgSources = append(downcastCfgSources, interface{}(cs)) - } - - if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { - cfg.BaseEndpoint = nil - return nil - } - - v, found, err := getBaseEndpoint(ctx, configs) - if err != nil { - return err - } - - if !found { - return nil - } - cfg.BaseEndpoint = aws.String(v) - return nil -} - -// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var -func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { - ID, _, err := getAppID(ctx, configs) - if err != nil { - return err - } - - cfg.AppID = ID - return nil -} - -// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's -// SharedConfig or EnvConfig -func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error { - disable, _, err := getDisableRequestCompression(ctx, configs) - if err != nil { - return err - } - - cfg.DisableRequestCompression = disable - return nil -} - -// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's -// SharedConfig or EnvConfig -func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error { - minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs) - if err != nil { - return err - } - // must set a default min size 10240 if not configured - if !found { - minBytes = 10240 - } - cfg.RequestMinCompressSizeBytes = minBytes - return nil -} - -// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's -// SharedConfig or EnvConfig -func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { - m, found, err := getAccountIDEndpointMode(ctx, configs) - if err != nil { - return err - } - - if !found { - m = aws.AccountIDEndpointModePreferred - } - - cfg.AccountIDEndpointMode = m - return nil -} - -// resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's -// SharedConfig or EnvConfig -func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getRequestChecksumCalculation(ctx, configs) - if err != nil { - return err - } - - if !found { - c = aws.RequestChecksumCalculationWhenSupported - } - cfg.RequestChecksumCalculation = c - return nil -} - -// resolveResponseValidation extracts the ResponseChecksumValidation from the configs slice's -// SharedConfig or EnvConfig -func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getResponseChecksumValidation(ctx, configs) - if err != nil { - return err - } - - if !found { - c = aws.ResponseChecksumValidationWhenSupported - } - cfg.ResponseChecksumValidation = c - return nil -} - -// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default -// region if region had not been resolved from other sources. -func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - if len(cfg.Region) > 0 { - return nil - } - - v, found, err := getDefaultRegion(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Region = v - - return nil -} - -// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance -// if one has not been resolved from other sources. -func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getHTTPClient(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.HTTPClient = c - return nil -} - -// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options -// if one has not been resolved from other sources. -func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - o, found, err := getAPIOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.APIOptions = o - - return nil -} - -// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice -// and sets the functions result on the aws.Config.EndpointResolver -func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { - endpointResolver, found, err := getEndpointResolver(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.EndpointResolver = endpointResolver - - return nil -} - -// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice -// and sets the functions result on the aws.Config.EndpointResolver -func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.EndpointResolverWithOptions = endpointResolver - - return nil -} - -func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { - logger, found, err := getLogger(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Logger = logger - - return nil -} - -func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { - mode, found, err := getClientLogMode(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.ClientLogMode = mode - - return nil -} - -func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { - retryer, found, err := getRetryer(ctx, configs) - if err != nil { - return err - } - - if found { - cfg.Retryer = retryer - return nil - } - - // Only load the retry options if a custom retryer has not be specified. - if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { - return err - } - return resolveRetryMode(ctx, cfg, configs) -} - -func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - if len(cfg.Region) > 0 { - return nil - } - - region, found, err := getEC2IMDSRegion(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Region = region - - return nil -} - -func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - defaultsMode, found, err := getDefaultsMode(ctx, configs) - if err != nil { - return err - } - if !found { - defaultsMode = aws.DefaultsModeLegacy - } - - var environment aws.RuntimeEnvironment - if defaultsMode == aws.DefaultsModeAuto { - envConfig, _, _ := getAWSConfigSources(configs) - - client, found, err := getDefaultsModeIMDSClient(ctx, configs) - if err != nil { - return err - } - if !found { - client = imds.NewFromConfig(*cfg) - } - - environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) - if err != nil { - return err - } - } - - cfg.DefaultsMode = defaultsMode - cfg.RuntimeEnvironment = environment - - return nil -} - -func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { - maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) - if err != nil || !found { - return err - } - cfg.RetryMaxAttempts = maxAttempts - - return nil -} - -func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { - retryMode, found, err := getRetryMode(ctx, configs) - if err != nil || !found { - return err - } - cfg.RetryMode = retryMode - - return nil -} - -func resolveInterceptors(ctx context.Context, cfg *aws.Config, configs configs) error { - // LoadOptions is the only thing that you can really configure interceptors - // on so just check that directly. - for _, c := range configs { - if loadopts, ok := c.(LoadOptions); ok { - cfg.Interceptors = loadopts.Interceptors.Copy() - } - } - return nil -} - -func resolveAuthSchemePreference(ctx context.Context, cfg *aws.Config, configs configs) error { - if pref, ok := getAuthSchemePreference(ctx, configs); ok { - cfg.AuthSchemePreference = pref - } - return nil -} - -func resolveServiceOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - serviceOptions, found, err := getServiceOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.ServiceOptions = serviceOptions - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go deleted file mode 100644 index a8ebb3c0a390..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go +++ /dev/null @@ -1,122 +0,0 @@ -package config - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - smithybearer "github.com/aws/smithy-go/auth/bearer" -) - -// resolveBearerAuthToken extracts a token provider from the config sources. -// -// If an explicit bearer authentication token provider is not found the -// resolver will fallback to resolving token provider via other config sources -// such as SharedConfig. -func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { - found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) - if found || err != nil { - return err - } - - return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) -} - -// resolveBearerAuthTokenProvider extracts the first instance of -// BearerAuthTokenProvider from the config sources. -// -// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure -// the Token is only refreshed when needed. This also protects the -// TokenProvider so it can be used concurrently. -// -// Config providers used: -// * bearerAuthTokenProviderProvider -func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { - tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) - if !found || err != nil { - return false, err - } - - cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( - ctx, configs, tokenProvider) - if err != nil { - return false, err - } - - return true, nil -} - -func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { - _, sharedConfig, _ := getAWSConfigSources(configs) - - var provider smithybearer.TokenProvider - - if sharedConfig.SSOSession != nil { - provider, err = resolveBearerAuthSSOTokenProvider( - ctx, cfg, sharedConfig.SSOSession, configs) - } - - if err == nil && provider != nil { - cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( - ctx, configs, provider) - } - - return err -} - -func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { - ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) - if err != nil { - return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) - } - - var optFns []func(*ssocreds.SSOTokenProviderOptions) - if found { - optFns = append(optFns, ssoTokenProviderOptionsFn) - } - - cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) - if err != nil { - return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) - } - - client := ssooidc.NewFromConfig(*cfg) - provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) - - return provider, nil -} - -// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go -// bearer/auth#TokenCache with the provided options if the provider is not -// already a TokenCache. -func wrapWithBearerAuthTokenCache( - ctx context.Context, - cfgs configs, - provider smithybearer.TokenProvider, - optFns ...func(*smithybearer.TokenCacheOptions), -) (smithybearer.TokenProvider, error) { - _, ok := provider.(*smithybearer.TokenCache) - if ok { - return provider, nil - } - - tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) - if err != nil { - return nil, err - } - - opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) - opts = append(opts, func(o *smithybearer.TokenCacheOptions) { - o.RefreshBeforeExpires = 5 * time.Minute - o.RetrieveBearerTokenTimeout = 30 * time.Second - }) - opts = append(opts, optFns...) - if optionsFound { - opts = append(opts, tokenCacheConfigOptions) - } - - return smithybearer.NewTokenCache(provider, opts...), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go deleted file mode 100644 index b00259df03aa..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ /dev/null @@ -1,627 +0,0 @@ -package config - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go-v2/service/sso" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - "github.com/aws/aws-sdk-go-v2/service/sts" -) - -const ( - // valid credential source values - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" -) - -// direct representation of the IPv4 address for the ECS container -// "169.254.170.2" -var ecsContainerIPv4 net.IP = []byte{ - 169, 254, 170, 2, -} - -// direct representation of the IPv4 address for the EKS container -// "169.254.170.23" -var eksContainerIPv4 net.IP = []byte{ - 169, 254, 170, 23, -} - -// direct representation of the IPv6 address for the EKS container -// "fd00:ec2::23" -var eksContainerIPv6 net.IP = []byte{ - 0xFD, 0, 0xE, 0xC2, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0x23, -} - -var ( - ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing -) - -// resolveCredentials extracts a credential provider from slice of config -// sources. -// -// If an explicit credential provider is not found the resolver will fallback -// to resolving credentials by extracting a credential provider from EnvConfig -// and SharedConfig. -func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { - found, err := resolveCredentialProvider(ctx, cfg, configs) - if found || err != nil { - return err - } - - return resolveCredentialChain(ctx, cfg, configs) -} - -// resolveCredentialProvider extracts the first instance of Credentials from the -// config slices. -// -// The resolved CredentialProvider will be wrapped in a cache to ensure the -// credentials are only refreshed when needed. This also protects the -// credential provider to be used concurrently. -// -// Config providers used: -// * credentialsProviderProvider -func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { - credProvider, found, err := getCredentialsProvider(ctx, configs) - if !found || err != nil { - return false, err - } - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) - if err != nil { - return false, err - } - - return true, nil -} - -// resolveCredentialChain resolves a credential provider chain using EnvConfig -// and SharedConfig if present in the slice of provided configs. -// -// The resolved CredentialProvider will be wrapped in a cache to ensure the -// credentials are only refreshed when needed. This also protects the -// credential provider to be used concurrently. -func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { - envConfig, sharedConfig, other := getAWSConfigSources(configs) - - // When checking if a profile was specified programmatically we should only consider the "other" - // configuration sources that have been provided. This ensures we correctly honor the expected credential - // hierarchy. - _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) - if err != nil { - return err - } - - switch { - case sharedProfileSet: - ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) - case envConfig.Credentials.HasKeys(): - ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVars) - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} - case len(envConfig.WebIdentityTokenFilePath) > 0: - ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVarsSTSWebIDToken) - err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) - default: - ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) - } - if err != nil { - return err - } - - // Wrap the resolved provider in a cache so the SDK will cache credentials. - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) - if err != nil { - return err - } - - return nil -} - -func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (ctx2 context.Context, err error) { - switch { - case sharedConfig.Source != nil: - ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSourceProfile) - // Assume IAM role with credentials source from a different profile. - ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) - - case sharedConfig.Credentials.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - ctx = addCredentialSource(ctx, aws.CredentialSourceProfile) - cfg.Credentials = credentials.StaticCredentialsProvider{ - Value: sharedConfig.Credentials, - Source: getCredentialSources(ctx), - } - - case len(sharedConfig.CredentialSource) != 0: - ctx = addCredentialSource(ctx, aws.CredentialSourceProfileNamedProvider) - ctx, err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) - - case len(sharedConfig.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSTSWebIDToken) - return ctx, assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) - - case sharedConfig.hasSSOConfiguration(): - if sharedConfig.hasLegacySSOConfiguration() { - ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSOLegacy) - ctx = addCredentialSource(ctx, aws.CredentialSourceSSOLegacy) - } else { - ctx = addCredentialSource(ctx, aws.CredentialSourceSSO) - } - if sharedConfig.SSOSession != nil { - ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSO) - } - err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) - - case len(sharedConfig.CredentialProcess) != 0: - // Get credentials from CredentialProcess - ctx = addCredentialSource(ctx, aws.CredentialSourceProfileProcess) - ctx = addCredentialSource(ctx, aws.CredentialSourceProcess) - err = processCredentials(ctx, cfg, sharedConfig, configs) - - case len(envConfig.ContainerCredentialsRelativePath) != 0: - ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) - err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) - - case len(envConfig.ContainerCredentialsEndpoint) != 0: - ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) - err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - - default: - ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) - err = resolveEC2RoleCredentials(ctx, cfg, configs) - } - if err != nil { - return ctx, err - } - - if len(sharedConfig.RoleARN) > 0 { - return ctx, credsFromAssumeRole(ctx, cfg, sharedConfig, configs) - } - - return ctx, nil -} - -func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { - if err := sharedConfig.validateSSOConfiguration(); err != nil { - return err - } - - var options []func(*ssocreds.Options) - v, found, err := getSSOProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - options = append(options, v) - } - - cfgCopy := cfg.Copy() - - options = append(options, func(o *ssocreds.Options) { - o.CredentialSources = getCredentialSources(ctx) - }) - - if sharedConfig.SSOSession != nil { - ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) - if err != nil { - return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) - } - var optFns []func(*ssocreds.SSOTokenProviderOptions) - if found { - optFns = append(optFns, ssoTokenProviderOptionsFn) - } - cfgCopy.Region = sharedConfig.SSOSession.SSORegion - cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) - if err != nil { - return err - } - oidcClient := ssooidc.NewFromConfig(cfgCopy) - tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) - options = append(options, func(o *ssocreds.Options) { - o.SSOTokenProvider = tokenProvider - o.CachedTokenFilepath = cachedPath - }) - } else { - cfgCopy.Region = sharedConfig.SSORegion - } - - cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) - - return nil -} - -func ecsContainerURI(path string) string { - return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) -} - -func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { - var opts []func(*processcreds.Options) - - options, found, err := getProcessCredentialOptions(ctx, configs) - if err != nil { - return err - } - if found { - opts = append(opts, options) - } - - opts = append(opts, func(o *processcreds.Options) { - o.CredentialSources = getCredentialSources(ctx) - }) - - cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) - - return nil -} - -// isAllowedHost allows host to be loopback or known ECS/EKS container IPs -// -// host can either be an IP address OR an unresolved hostname - resolution will -// be automatically performed in the latter case -func isAllowedHost(host string) (bool, error) { - if ip := net.ParseIP(host); ip != nil { - return isIPAllowed(ip), nil - } - - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - - for _, addr := range addrs { - if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { - return false, nil - } - } - - return true, nil -} - -func isIPAllowed(ip net.IP) bool { - return ip.IsLoopback() || - ip.Equal(ecsContainerIPv4) || - ip.Equal(eksContainerIPv4) || - ip.Equal(eksContainerIPv6) -} - -func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { - var resolveErr error - - parsed, err := url.Parse(endpointURL) - if err != nil { - resolveErr = fmt.Errorf("invalid URL, %w", err) - } else { - host := parsed.Hostname() - if len(host) == 0 { - resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if parsed.Scheme == "http" { - if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { - resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr) - } else if !isAllowedHost { - resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host) - } - } - } - - if resolveErr != nil { - return resolveErr - } - - return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) -} - -func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { - optFns := []func(*endpointcreds.Options){ - func(options *endpointcreds.Options) { - if len(authToken) != 0 { - options.AuthorizationToken = authToken - } - if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { - options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { - var contents []byte - var err error - if contents, err = ioutil.ReadFile(authFilePath); err != nil { - return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) - } - return string(contents), nil - }) - } - options.APIOptions = cfg.APIOptions - if cfg.Retryer != nil { - options.Retryer = cfg.Retryer() - } - options.CredentialSources = getCredentialSources(ctx) - }, - } - - optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - provider := endpointcreds.New(url, optFns...) - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { - options.ExpiryWindow = 5 * time.Minute - }) - if err != nil { - return err - } - - return nil -} - -func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (context.Context, error) { - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) - return ctx, resolveEC2RoleCredentials(ctx, cfg, configs) - - case credSourceEnvironment: - ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} - - case credSourceECSContainer: - ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) - if len(envConfig.ContainerCredentialsRelativePath) != 0 { - return ctx, resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) - } - if len(envConfig.ContainerCredentialsEndpoint) != 0 { - return ctx, resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - } - return ctx, fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") - - default: - return ctx, fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") - } - - return ctx, nil -} - -func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { - optFns := make([]func(*ec2rolecreds.Options), 0, 2) - - optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - optFns = append(optFns, func(o *ec2rolecreds.Options) { - // Only define a client from config if not already defined. - if o.Client == nil { - o.Client = imds.NewFromConfig(*cfg) - } - o.CredentialSources = getCredentialSources(ctx) - }) - - provider := ec2rolecreds.New(optFns...) - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) - if err != nil { - return err - } - return nil -} - -func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { - var ( - envConfig *EnvConfig - sharedConfig *SharedConfig - other configs - ) - - for i := range cfgs { - switch c := cfgs[i].(type) { - case EnvConfig: - if envConfig == nil { - envConfig = &c - } - case *EnvConfig: - if envConfig == nil { - envConfig = c - } - case SharedConfig: - if sharedConfig == nil { - sharedConfig = &c - } - case *SharedConfig: - if envConfig == nil { - sharedConfig = c - } - default: - other = append(other, c) - } - } - - if envConfig == nil { - envConfig = &EnvConfig{} - } - - if sharedConfig == nil { - sharedConfig = &SharedConfig{} - } - - return envConfig, sharedConfig, other -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Error is the error message -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { - if len(filepath) == 0 { - return fmt.Errorf("token file path is not set") - } - - optFns := []func(*stscreds.WebIdentityRoleOptions){ - func(options *stscreds.WebIdentityRoleOptions) { - options.RoleSessionName = sessionName - }, - } - - optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - - if found { - optFns = append(optFns, optFn) - } - - opts := stscreds.WebIdentityRoleOptions{ - RoleARN: roleARN, - } - - optFns = append(optFns, func(options *stscreds.WebIdentityRoleOptions) { - options.CredentialSources = getCredentialSources(ctx) - }) - - for _, fn := range optFns { - fn(&opts) - } - - if len(opts.RoleARN) == 0 { - return fmt.Errorf("role ARN is not set") - } - - client := opts.Client - if client == nil { - client = sts.NewFromConfig(*cfg) - } - - provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) - - cfg.Credentials = provider - - return nil -} - -func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { - // resolve credentials early - credentialSources := getCredentialSources(ctx) - optFns := []func(*stscreds.AssumeRoleOptions){ - func(options *stscreds.AssumeRoleOptions) { - options.RoleSessionName = sharedCfg.RoleSessionName - if sharedCfg.RoleDurationSeconds != nil { - if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { - options.Duration = *sharedCfg.RoleDurationSeconds - } - } - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - options.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) != 0 { - options.SerialNumber = aws.String(sharedCfg.MFASerial) - } - - // add existing credential chain - options.CredentialSources = credentialSources - }, - } - - optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - { - // Synthesize options early to validate configuration errors sooner to ensure a token provider - // is present if the SerialNumber was set. - var o stscreds.AssumeRoleOptions - for _, fn := range optFns { - fn(&o) - } - if o.TokenProvider == nil && o.SerialNumber != nil { - return AssumeRoleTokenProviderNotSetError{} - } - } - cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) - - return nil -} - -// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache -// with the provided options if the provider is not already a -// aws.CredentialsCache. -func wrapWithCredentialsCache( - ctx context.Context, - cfgs configs, - provider aws.CredentialsProvider, - optFns ...func(options *aws.CredentialsCacheOptions), -) (aws.CredentialsProvider, error) { - _, ok := provider.(*aws.CredentialsCache) - if ok { - return provider, nil - } - - credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) - if err != nil { - return nil, err - } - - // force allocation of a new slice if the additional options are - // needed, to prevent overwriting the passed in slice of options. - optFns = optFns[:len(optFns):len(optFns)] - if optionsFound { - optFns = append(optFns, credCacheOptions) - } - - return aws.NewCredentialsCache(provider, optFns...), nil -} - -// credentialSource stores the chain of providers that was used to create an instance of -// a credentials provider on the context -type credentialSource struct{} - -func addCredentialSource(ctx context.Context, source aws.CredentialSource) context.Context { - existing, ok := ctx.Value(credentialSource{}).([]aws.CredentialSource) - if !ok { - existing = []aws.CredentialSource{source} - } else { - existing = append(existing, source) - } - return context.WithValue(ctx, credentialSource{}, existing) -} - -func getCredentialSources(ctx context.Context) []aws.CredentialSource { - return ctx.Value(credentialSource{}).([]aws.CredentialSource) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go deleted file mode 100644 index 97be3f75694e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ /dev/null @@ -1,1696 +0,0 @@ -package config - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go-v2/internal/ini" - "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" - "github.com/aws/smithy-go/logging" - smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" -) - -const ( - // Prefix to use for filtering profiles. The profile prefix should only - // exist in the shared config file, not the credentials file. - profilePrefix = `profile ` - - // Prefix to be used for SSO sections. These are supposed to only exist in - // the shared config file, not the credentials file. - ssoSectionPrefix = `sso-session ` - - // Prefix for services section. It is referenced in profile via the services - // parameter to configure clients for service-specific parameters. - servicesPrefix = `services ` - - // string equivalent for boolean - endpointDiscoveryDisabled = `false` - endpointDiscoveryEnabled = `true` - endpointDiscoveryAuto = `auto` - - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // AWS Single Sign-On (AWS SSO) group - ssoSessionNameKey = "sso_session" - - ssoRegionKey = "sso_region" - ssoStartURLKey = "sso_start_url" - - ssoAccountIDKey = "sso_account_id" - ssoRoleNameKey = "sso_role_name" - - // Additional Config fields - regionKey = `region` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" - - ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" - - ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" - - ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" - - // Use DualStack Endpoint Resolution - useDualStackEndpoint = "use_dualstack_endpoint" - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 Disable Multi-Region AccessPoints - s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` - - useFIPSEndpointKey = "use_fips_endpoint" - - defaultsModeKey = "defaults_mode" - - // Retry options - retryMaxAttemptsKey = "max_attempts" - retryModeKey = "retry_mode" - - caBundleKey = "ca_bundle" - - sdkAppID = "sdk_ua_app_id" - - ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" - - endpointURL = "endpoint_url" - - servicesSectionKey = "services" - - disableRequestCompression = "disable_request_compression" - requestMinCompressionSizeBytes = "request_min_compression_size_bytes" - - s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" - - accountIDKey = "aws_account_id" - accountIDEndpointMode = "account_id_endpoint_mode" - - requestChecksumCalculationKey = "request_checksum_calculation" - responseChecksumValidationKey = "response_checksum_validation" - checksumWhenSupported = "when_supported" - checksumWhenRequired = "when_required" - - authSchemePreferenceKey = "auth_scheme_preference" -) - -// defaultSharedConfigProfile allows for swapping the default profile for testing -var defaultSharedConfigProfile = DefaultSharedConfigProfile - -// DefaultSharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func DefaultSharedCredentialsFilename() string { - return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") -} - -// DefaultSharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func DefaultSharedConfigFilename() string { - return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") -} - -// DefaultSharedConfigFiles is a slice of the default shared config files that -// the will be used in order to load the SharedConfig. -var DefaultSharedConfigFiles = []string{ - DefaultSharedConfigFilename(), -} - -// DefaultSharedCredentialsFiles is a slice of the default shared credentials -// files that the will be used in order to load the SharedConfig. -var DefaultSharedCredentialsFiles = []string{ - DefaultSharedCredentialsFilename(), -} - -// SSOSession provides the shared configuration parameters of the sso-session -// section. -type SSOSession struct { - Name string - SSORegion string - SSOStartURL string -} - -func (s *SSOSession) setFromIniSection(section ini.Section) { - updateString(&s.Name, section, ssoSessionNameKey) - updateString(&s.SSORegion, section, ssoRegionKey) - updateString(&s.SSOStartURL, section, ssoStartURLKey) -} - -// Services contains values configured in the services section -// of the AWS configuration file. -type Services struct { - // Services section values - // {"serviceId": {"key": "value"}} - // e.g. {"s3": {"endpoint_url": "example.com"}} - ServiceValues map[string]map[string]string -} - -func (s *Services) setFromIniSection(section ini.Section) { - if s.ServiceValues == nil { - s.ServiceValues = make(map[string]map[string]string) - } - for _, service := range section.List() { - s.ServiceValues[service] = section.Map(service) - } -} - -// SharedConfig represents the configuration fields of the SDK config files. -type SharedConfig struct { - Profile string - - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Credentials aws.Credentials - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - // SSO session options - SSOSessionName string - SSOSession *SSOSession - - // Legacy SSO session options - SSORegion string - SSOStartURL string - - // SSO fields not used - SSOAccountID string - SSORoleName string - - RoleARN string - ExternalID string - MFASerial string - RoleSessionName string - RoleDurationSeconds *time.Duration - - SourceProfileName string - Source *SharedConfig - - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. - // - // region = us-west-2 - Region string - - // EnableEndpointDiscovery can be enabled or disabled in the shared config - // by setting endpoint_discovery_enabled to true, or false respectively. - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion *bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection - // mode (IPv4 or IPv6) - // - // ec2_metadata_service_endpoint_mode=IPv6 - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If - // specified it overrides EC2IMDSEndpointMode. - // - // ec2_metadata_service_endpoint=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies that IMDS clients should not fallback to IMDSv1 if token - // requests fail. - // - // ec2_metadata_v1_disabled=true - EC2IMDSv1Disabled *bool - - // Specifies if the S3 service should disable support for Multi-Region - // access-points - // - // s3_disable_multiregion_access_points=true - S3DisableMultiRegionAccessPoints *bool - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // use_dualstack_endpoint=true - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // use_fips_endpoint=true - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies which defaults mode should be used by services. - // - // defaults_mode=standard - DefaultsMode aws.DefaultsMode - - // Specifies the maximum number attempts an API client will call an - // operation that fails with a retryable error. - // - // max_attempts=3 - RetryMaxAttempts int - - // Specifies the retry model the API client will be created with. - // - // retry_mode=standard - RetryMode aws.RetryMode - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. Only use - // this if you want to configure the SDK to use a custom set of CAs. - // - // Enabling this option will attempt to merge the Transport into the SDK's - // HTTP client. If the client's Transport is not a http.Transport an error - // will be returned. If the Transport's TLS config is set this option will - // cause the SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this - // setting. To use this option and custom HTTP client, the HTTP client - // needs to be provided when creating the config. Not the service client. - // - // ca_bundle=$HOME/my_custom_ca_bundle - CustomCABundle string - - // aws sdk app ID that can be added to user agent header string - AppID string - - // Flag used to disable configured endpoints. - IgnoreConfiguredEndpoints *bool - - // Value to contain configured endpoints to be propagated to - // corresponding endpoint resolution field. - BaseEndpoint string - - // Services section config. - ServicesSectionName string - Services Services - - // determine if request compression is allowed, default to false - // retrieved from config file's profile field disable_request_compression - DisableRequestCompression *bool - - // inclusive threshold request body size to trigger compression, - // default to 10240 and must be within 0 and 10485760 bytes inclusive - // retrieved from config file's profile field request_min_compression_size_bytes - RequestMinCompressSizeBytes *int64 - - // Whether S3Express auth is disabled. - // - // This will NOT prevent requests from being made to S3Express buckets, it - // will only bypass the modified endpoint routing and signing behaviors - // associated with the feature. - S3DisableExpressAuth *bool - - AccountIDEndpointMode aws.AccountIDEndpointMode - - // RequestChecksumCalculation indicates if the request checksum should be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // ResponseChecksumValidation indicates if the response checksum should be validated - ResponseChecksumValidation aws.ResponseChecksumValidation - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string -} - -func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { - if len(c.DefaultsMode) == 0 { - return "", false, nil - } - - return c.DefaultsMode, true, nil -} - -// GetRetryMaxAttempts returns the maximum number of attempts an API client -// created Retryer should attempt an operation call before failing. -func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { - if c.RetryMaxAttempts == 0 { - return 0, false, nil - } - - return c.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the model the API client should create its Retryer in. -func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { - if len(c.RetryMode) == 0 { - return "", false, nil - } - - return c.RetryMode, true, nil -} - -// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region -// the client's requests are sent to. -func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { - if c.S3UseARNRegion == nil { - return false, false, nil - } - - return *c.S3UseARNRegion, true, nil -} - -// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. -func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { - if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - - return c.EnableEndpointDiscovery, true, nil -} - -// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region -// access-points. -func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { - if c.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - - return *c.S3DisableMultiRegionAccessPoints, true, nil -} - -// GetRegion returns the region for the profile if a region is set. -func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { - if len(c.Region) == 0 { - return "", false, nil - } - return c.Region, true, nil -} - -// GetCredentialsProvider returns the credentials for a profile if they were set. -func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { - return c.Credentials, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return c.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { - if len(c.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return c.EC2IMDSEndpoint, true, nil -} - -// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option -// resolver interface. -func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { - if c.EC2IMDSv1Disabled == nil { - return false, false - } - - return *c.EC2IMDSv1Disabled, true -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - - return c.UseDualStackEndpoint, true, nil -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - - return c.UseFIPSEndpoint, true, nil -} - -// GetS3DisableExpressAuth returns the configured value for -// [SharedConfig.S3DisableExpressAuth]. -func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) { - if c.S3DisableExpressAuth == nil { - return false, false - } - - return *c.S3DisableExpressAuth, true -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { - if len(c.CustomCABundle) == 0 { - return nil, false, nil - } - - b, err := ioutil.ReadFile(c.CustomCABundle) - if err != nil { - return nil, false, err - } - return bytes.NewReader(b), true, nil -} - -// getAppID returns the sdk app ID if set in shared config profile -func (c SharedConfig) getAppID(context.Context) (string, bool, error) { - return c.AppID, len(c.AppID) > 0, nil -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { - if c.IgnoreConfiguredEndpoints == nil { - return false, false, nil - } - - return *c.IgnoreConfiguredEndpoints, true, nil -} - -func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { - return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil -} - -// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use -// with configured endpoints. -func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { - if endpt, ok := service[endpointURL]; ok { - return endpt, true, nil - } - } - return "", false, nil -} - -func normalizeShared(sdkID string) string { - lower := strings.ToLower(sdkID) - return strings.ReplaceAll(lower, " ", "_") -} - -func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { - return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil -} - -// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the -// addition of ignoring when none of the files exist or when the profile -// is not found in any of the files. -func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { - cfg, err := loadSharedConfig(ctx, configs) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistError); ok { - return SharedConfig{}, nil - } - return nil, err - } - - return cfg, nil -} - -// loadSharedConfig uses the configs passed in to load the SharedConfig from file -// The file names and profile name are sourced from the configs. -// -// If profile name is not provided DefaultSharedConfigProfile (default) will -// be used. -// -// If shared config filenames are not provided DefaultSharedConfigFiles will -// be used. -// -// Config providers used: -// * sharedConfigProfileProvider -// * sharedConfigFilesProvider -func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { - var profile string - var configFiles []string - var credentialsFiles []string - var ok bool - var err error - - profile, ok, err = getSharedConfigProfile(ctx, configs) - if err != nil { - return nil, err - } - if !ok { - profile = defaultSharedConfigProfile - } - - configFiles, ok, err = getSharedConfigFiles(ctx, configs) - if err != nil { - return nil, err - } - - credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) - if err != nil { - return nil, err - } - - // setup logger if log configuration warning is seti - var logger logging.Logger - logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) - if err != nil { - return SharedConfig{}, err - } - if found && logWarnings { - logger, found, err = getLogger(ctx, configs) - if err != nil { - return SharedConfig{}, err - } - if !found { - logger = logging.NewStandardLogger(os.Stderr) - } - } - - return LoadSharedConfigProfile(ctx, profile, - func(o *LoadSharedConfigOptions) { - o.Logger = logger - o.ConfigFiles = configFiles - o.CredentialsFiles = credentialsFiles - }, - ) -} - -// LoadSharedConfigOptions struct contains optional values that can be used to load the config. -type LoadSharedConfigOptions struct { - - // CredentialsFiles are the shared credentials files - CredentialsFiles []string - - // ConfigFiles are the shared config files - ConfigFiles []string - - // Logger is the logger used to log shared config behavior - Logger logging.Logger -} - -// LoadSharedConfigProfile retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. -// -// If config files are not set, SDK will default to using a file at location `.aws/config` if present. -// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. -// No default files are set, if files set to an empty slice. -// -// You can read more about shared config and credentials file location at -// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location -func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { - var option LoadSharedConfigOptions - for _, fn := range optFns { - fn(&option) - } - - if option.ConfigFiles == nil { - option.ConfigFiles = DefaultSharedConfigFiles - } - - if option.CredentialsFiles == nil { - option.CredentialsFiles = DefaultSharedCredentialsFiles - } - - // load shared configuration sections from shared configuration INI options - configSections, err := loadIniFiles(option.ConfigFiles) - if err != nil { - return SharedConfig{}, err - } - - // check for profile prefix and drop duplicates or invalid profiles - err = processConfigSections(ctx, &configSections, option.Logger) - if err != nil { - return SharedConfig{}, err - } - - // load shared credentials sections from shared credentials INI options - credentialsSections, err := loadIniFiles(option.CredentialsFiles) - if err != nil { - return SharedConfig{}, err - } - - // check for profile prefix and drop duplicates or invalid profiles - err = processCredentialsSections(ctx, &credentialsSections, option.Logger) - if err != nil { - return SharedConfig{}, err - } - - err = mergeSections(&configSections, credentialsSections) - if err != nil { - return SharedConfig{}, err - } - - cfg := SharedConfig{} - profiles := map[string]struct{}{} - - if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { - return SharedConfig{}, err - } - - return cfg, nil -} - -func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { - skipSections := map[string]struct{}{} - - for _, section := range sections.List() { - if _, ok := skipSections[section]; ok { - continue - } - - // drop sections from config file that do not have expected prefixes. - switch { - case strings.HasPrefix(section, profilePrefix): - // Rename sections to remove "profile " prefixing to match with - // credentials file. If default is already present, it will be - // dropped. - newName, err := renameProfileSection(section, sections, logger) - if err != nil { - return fmt.Errorf("failed to rename profile section, %w", err) - } - skipSections[newName] = struct{}{} - - case strings.HasPrefix(section, ssoSectionPrefix): - case strings.HasPrefix(section, servicesPrefix): - case strings.EqualFold(section, "default"): - default: - // drop this section, as invalid profile name - sections.DeleteSection(section) - - if logger != nil { - logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ - "For use within a shared configuration file, "+ - "a non-default profile must have `profile ` "+ - "prefixed to the profile name.", - section, - ) - } - } - } - return nil -} - -func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { - v, ok := sections.GetSection(section) - if !ok { - return "", fmt.Errorf("error processing profiles within the shared configuration files") - } - - // delete section with profile as prefix - sections.DeleteSection(section) - - // set the value to non-prefixed name in sections. - section = strings.TrimPrefix(section, profilePrefix) - if sections.HasSection(section) { - oldSection, _ := sections.GetSection(section) - v.Logs = append(v.Logs, - fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ - "overriding non-default profile from %s", - v.SourceFile, oldSection.SourceFile)) - sections.DeleteSection(section) - } - - // assign non-prefixed name to section - v.Name = section - sections.SetSection(section, v) - - return section, nil -} - -func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { - for _, section := range sections.List() { - // drop profiles with prefix for credential files - if strings.HasPrefix(section, profilePrefix) { - // drop this section, as invalid profile name - sections.DeleteSection(section) - - if logger != nil { - logger.Logf(logging.Debug, - "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ - "for the shared credentials file.\n", - section, - ) - } - } - } - return nil -} - -func loadIniFiles(filenames []string) (ini.Sections, error) { - mergedSections := ini.NewSections() - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - var v *ini.UnableToReadFile - if ok := errors.As(err, &v); ok { - // Skip files which can't be opened and read for whatever reason. - // We treat such files as empty, and do not fall back to other locations. - continue - } else if err != nil { - return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} - } - - // mergeSections into mergedSections - err = mergeSections(&mergedSections, sections) - if err != nil { - return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} - } - } - - return mergedSections, nil -} - -// mergeSections merges source section properties into destination section properties -func mergeSections(dst *ini.Sections, src ini.Sections) error { - for _, sectionName := range src.List() { - srcSection, _ := src.GetSection(sectionName) - - if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || - (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { - srcSection.Errors = append(srcSection.Errors, - fmt.Errorf("partial credentials found for profile %v", sectionName)) - } - - if !dst.HasSection(sectionName) { - dst.SetSection(sectionName, srcSection) - continue - } - - // merge with destination srcSection - dstSection, _ := dst.GetSection(sectionName) - - // errors should be overriden if any - dstSection.Errors = srcSection.Errors - - // Access key id update - if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { - accessKey := srcSection.String(accessKeyIDKey) - secretKey := srcSection.String(secretAccessKey) - - if dstSection.Has(accessKeyIDKey) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, - dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) - } - - // update access key - v, err := ini.NewStringValue(accessKey) - if err != nil { - return fmt.Errorf("error merging access key, %w", err) - } - dstSection.UpdateValue(accessKeyIDKey, v) - - // update secret key - v, err = ini.NewStringValue(secretKey) - if err != nil { - return fmt.Errorf("error merging secret key, %w", err) - } - dstSection.UpdateValue(secretAccessKey, v) - - // update session token - if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { - return err - } - - // update source file to reflect where the static creds came from - dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) - dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) - } - - stringKeys := []string{ - roleArnKey, - sourceProfileKey, - credentialSourceKey, - externalIDKey, - mfaSerialKey, - roleSessionNameKey, - regionKey, - enableEndpointDiscoveryKey, - credentialProcessKey, - webIdentityTokenFileKey, - s3UseARNRegionKey, - s3DisableMultiRegionAccessPointsKey, - ec2MetadataServiceEndpointModeKey, - ec2MetadataServiceEndpointKey, - ec2MetadataV1DisabledKey, - useDualStackEndpoint, - useFIPSEndpointKey, - defaultsModeKey, - retryModeKey, - caBundleKey, - roleDurationSecondsKey, - retryMaxAttemptsKey, - - ssoSessionNameKey, - ssoAccountIDKey, - ssoRegionKey, - ssoRoleNameKey, - ssoStartURLKey, - - authSchemePreferenceKey, - } - for i := range stringKeys { - if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { - return err - } - } - - // set srcSection on dst srcSection - *dst = dst.SetSection(sectionName, dstSection) - } - - return nil -} - -func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { - if srcSection.Has(key) { - srcValue := srcSection.String(key) - val, err := ini.NewStringValue(srcValue) - if err != nil { - return fmt.Errorf("error merging %s, %w", key, err) - } - - if dstSection.Has(key) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, - dstSection.SourceFile[key], srcSection.SourceFile[key])) - } - - dstSection.UpdateValue(key, val) - dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) - } - return nil -} - -func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { - return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ - "with a %v value found in a duplicate profile defined at file %v. \n", - sectionName, key, dstSourceFile, key, srcSourceFile) -} - -// Returns an error if all of the files fail to load. If at least one file is -// successfully loaded and contains the profile, no error will be returned. -func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, - sections ini.Sections, logger logging.Logger) error { - c.Profile = profile - - section, ok := sections.GetSection(profile) - if !ok { - return SharedConfigProfileNotExistError{ - Profile: profile, - } - } - - // if logs are appended to the section, log them - if section.Logs != nil && logger != nil { - for _, log := range section.Logs { - logger.Logf(logging.Debug, log) - } - } - - // set config from the provided INI section - err := c.setFromIniSection(profile, section) - if err != nil { - return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - c.clearAssumeRoleOptions() - } else { - // First time a profile has been seen. Assert if the credential type - // requires a role ARN, the ARN is also set - if err := c.validateCredentialsConfig(profile); err != nil { - return err - } - } - - // if not top level profile and has credentials, return with credentials. - if len(profiles) != 0 && c.Credentials.HasKeys() { - return nil - } - - profiles[profile] = struct{}{} - - // validate no colliding credentials type are present - if err := c.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(c.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - c.clearCredentialOptions() - - srcCfg := &SharedConfig{} - err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) - if err != nil { - // SourceProfileName that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: c.RoleARN, - Profile: c.SourceProfileName, - Err: err, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: c.RoleARN, - Profile: c.SourceProfileName, - } - } - - c.Source = srcCfg - } - - // If the profile contains an SSO session parameter, the session MUST exist - // as a section in the config file. Load the SSO session using the name - // provided. If the session section is not found or incomplete an error - // will be returned. - if c.hasSSOTokenProviderConfiguration() { - section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) - if !ok { - return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) - } - var ssoSession SSOSession - ssoSession.setFromIniSection(section) - ssoSession.Name = c.SSOSessionName - c.SSOSession = &ssoSession - } - - if len(c.ServicesSectionName) > 0 { - if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok { - var svcs Services - svcs.setFromIniSection(section) - c.Services = svcs - } - } - - return nil -} - -// setFromIniSection loads the configuration from the profile section defined in -// the provided INI file. A SharedConfig pointer type value is used so that -// multiple config file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { - if len(section.Name) == 0 { - sources := make([]string, 0) - for _, v := range section.SourceFile { - sources = append(sources, v) - } - - return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) - } - - if len(section.Errors) != 0 { - var errStatement string - for i, e := range section.Errors { - errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) - } - return fmt.Errorf("Error using profile: \n %v", errStatement) - } - - // Assume Role - updateString(&c.RoleARN, section, roleArnKey) - updateString(&c.ExternalID, section, externalIDKey) - updateString(&c.MFASerial, section, mfaSerialKey) - updateString(&c.RoleSessionName, section, roleSessionNameKey) - updateString(&c.SourceProfileName, section, sourceProfileKey) - updateString(&c.CredentialSource, section, credentialSourceKey) - updateString(&c.Region, section, regionKey) - - // AWS Single Sign-On (AWS SSO) - // SSO session options - updateString(&c.SSOSessionName, section, ssoSessionNameKey) - - // Legacy SSO session options - updateString(&c.SSORegion, section, ssoRegionKey) - updateString(&c.SSOStartURL, section, ssoStartURLKey) - - // SSO fields not used - updateString(&c.SSOAccountID, section, ssoAccountIDKey) - updateString(&c.SSORoleName, section, ssoRoleNameKey) - - // we're retaining a behavioral quirk with this field that existed before - // the removal of literal parsing for #2276: - // - if the key is missing, the config field will not be set - // - if the key is set to a non-numeric, the config field will be set to 0 - if section.Has(roleDurationSecondsKey) { - if v, ok := section.Int(roleDurationSecondsKey); ok { - c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) - } else { - c.RoleDurationSeconds = aws.Duration(time.Duration(0)) - } - } - - updateString(&c.CredentialProcess, section, credentialProcessKey) - updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) - updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) - updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey) - - if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) - } - updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) - updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) - - updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) - updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) - - if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) - } - - if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) - } - if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) - } - - updateString(&c.CustomCABundle, section, caBundleKey) - - // user agent app ID added to request User-Agent header - updateString(&c.AppID, section, sdkAppID) - - updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) - - updateString(&c.BaseEndpoint, section, endpointURL) - - if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err) - } - if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) - } - - if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) - } - - if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err) - } - if err := updateResponseChecksumValidation(&c.ResponseChecksumValidation, section, responseChecksumValidationKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err) - } - - // Shared Credentials - creds := aws.Credentials{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), - AccountID: section.String(accountIDKey), - } - - if creds.HasKeys() { - c.Credentials = creds - } - - updateString(&c.ServicesSectionName, section, servicesSectionKey) - - c.AuthSchemePreference = toAuthSchemePreferenceList(section.String(authSchemePreferenceKey)) - - return nil -} - -func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v, ok := sec.Int(key) - if !ok { - return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key)) - } - if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes { - return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v) - } - *bytes = new(int64) - **bytes = v - return nil -} - -func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch { - case v == "true": - *disable = new(bool) - **disable = true - case v == "false": - *disable = new(bool) - **disable = false - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v) - } - return nil -} - -func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch v { - case "preferred": - *m = aws.AccountIDEndpointModePreferred - case "required": - *m = aws.AccountIDEndpointModeRequired - case "disabled": - *m = aws.AccountIDEndpointModeDisabled - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) - } - - return nil -} - -func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch strings.ToLower(v) { - case checksumWhenSupported: - *m = aws.RequestChecksumCalculationWhenSupported - case checksumWhenRequired: - *m = aws.RequestChecksumCalculationWhenRequired - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) - } - - return nil -} - -func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch strings.ToLower(v) { - case checksumWhenSupported: - *m = aws.ResponseChecksumValidationWhenSupported - case checksumWhenRequired: - *m = aws.ResponseChecksumValidationWhenRequired - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) - } - - return nil -} - -func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { - if c.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *c.RequestMinCompressSizeBytes, true, nil -} - -func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { - if c.DisableRequestCompression == nil { - return false, false, nil - } - return *c.DisableRequestCompression, true, nil -} - -func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { - return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil -} - -func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { - return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil -} - -func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { - return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil -} - -func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - if ok := mode.SetFromString(value); !ok { - return fmt.Errorf("invalid value: %s", value) - } - return nil -} - -func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { - if !section.Has(key) { - return nil - } - value := section.String(key) - if *mode, err = aws.ParseRetryMode(value); err != nil { - return err - } - return nil -} - -func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - return endpointMode.SetFromString(value) -} - -func (c *SharedConfig) validateCredentialsConfig(profile string) error { - if err := c.validateCredentialsRequireARN(profile); err != nil { - return err - } - - return nil -} - -func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(c.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(c.CredentialSource) != 0: - credSource = credentialSourceKey - case len(c.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(c.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (c *SharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(c.SourceProfileName) != 0, - len(c.CredentialSource) != 0, - len(c.CredentialProcess) != 0, - len(c.WebIdentityTokenFile) != 0, - ) { - return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") - } - - return nil -} - -func (c *SharedConfig) validateSSOConfiguration() error { - if c.hasSSOTokenProviderConfiguration() { - err := c.validateSSOTokenProviderConfiguration() - if err != nil { - return err - } - return nil - } - - if c.hasLegacySSOConfiguration() { - err := c.validateLegacySSOConfiguration() - if err != nil { - return err - } - } - return nil -} - -func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { - var missing []string - - if len(c.SSOSessionName) == 0 { - missing = append(missing, ssoSessionNameKey) - } - - if c.SSOSession == nil { - missing = append(missing, ssoSectionPrefix) - } else { - if len(c.SSOSession.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(c.SSOSession.SSOStartURL) == 0 { - missing = append(missing, ssoStartURLKey) - } - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - c.Profile, strings.Join(missing, ", ")) - } - - if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { - return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) - } - - if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { - return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) - } - - return nil -} - -func (c *SharedConfig) validateLegacySSOConfiguration() error { - var missing []string - - if len(c.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(c.SSOStartURL) == 0 { - missing = append(missing, ssoStartURLKey) - } - - if len(c.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(c.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - c.Profile, strings.Join(missing, ", ")) - } - return nil -} - -func (c *SharedConfig) hasCredentials() bool { - switch { - case len(c.SourceProfileName) != 0: - case len(c.CredentialSource) != 0: - case len(c.CredentialProcess) != 0: - case len(c.WebIdentityTokenFile) != 0: - case c.hasSSOConfiguration(): - case c.Credentials.HasKeys(): - default: - return false - } - - return true -} - -func (c *SharedConfig) hasSSOConfiguration() bool { - return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() -} - -func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { - return len(c.SSOSessionName) > 0 -} - -func (c *SharedConfig) hasLegacySSOConfiguration() bool { - return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 -} - -func (c *SharedConfig) clearAssumeRoleOptions() { - c.RoleARN = "" - c.ExternalID = "" - c.MFASerial = "" - c.RoleSessionName = "" - c.SourceProfileName = "" -} - -func (c *SharedConfig) clearCredentialOptions() { - c.CredentialSource = "" - c.CredentialProcess = "" - c.WebIdentityTokenFile = "" - c.Credentials = aws.Credentials{} - c.SSOAccountID = "" - c.SSORegion = "" - c.SSORoleName = "" - c.SSOStartURL = "" -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigLoadError) Unwrap() error { - return e.Err -} - -func (e SharedConfigLoadError) Error() string { - return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) -} - -// SharedConfigProfileNotExistError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistError struct { - Filename []string - Profile string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigProfileNotExistError) Unwrap() error { - return e.Err -} - -func (e SharedConfigProfileNotExistError) Error() string { - return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - Profile string - RoleARN string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) Unwrap() error { - return e.Err -} - -func (e SharedConfigAssumeRoleError) Error() string { - return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", - e.RoleARN, e.Profile, e.Err) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateInt will only update the dst with the value in the section key, key -// is present in the section. -// -// Down casts the INI integer value from a int64 to an int, which could be -// different bit size depending on platform. -func updateInt(dst *int, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - - v, ok := section.Int(key) - if !ok { - return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) - } - - *dst = int(v) - return nil -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - v, _ := section.Bool(key) - *dst = v -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - v, _ := section.Bool(key) - *dst = new(bool) - **dst = v -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - value := section.String(key) - if len(value) == 0 { - return - } - - switch { - case strings.EqualFold(value, endpointDiscoveryDisabled): - *dst = aws.EndpointDiscoveryDisabled - case strings.EqualFold(value, endpointDiscoveryEnabled): - *dst = aws.EndpointDiscoveryEnabled - case strings.EqualFold(value, endpointDiscoveryAuto): - *dst = aws.EndpointDiscoveryAuto - } -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - if v, _ := section.Bool(key); v { - *dst = aws.DualStackEndpointStateEnabled - } else { - *dst = aws.DualStackEndpointStateDisabled - } - - return -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - if v, _ := section.Bool(key); v { - *dst = aws.FIPSEndpointStateEnabled - } else { - *dst = aws.FIPSEndpointStateDisabled - } - - return -} - -func (c SharedConfig) getAuthSchemePreference() ([]string, bool) { - if len(c.AuthSchemePreference) > 0 { - return c.AuthSchemePreference, true - } - return nil, false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md deleted file mode 100644 index 6a3ab82df511..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ /dev/null @@ -1,806 +0,0 @@ -# v1.18.7 (2025-08-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2025-08-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.4 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2025-07-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.71 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.70 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.69 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.68 (2025-06-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.67 (2025-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.66 (2025-04-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.65 (2025-03-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.64 (2025-03-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.63 (2025-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.62 (2025-03-04.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.61 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.60 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.59 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.58 (2025-02-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.57 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.56 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.55 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.17.54 (2025-01-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.53 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.52 (2025-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.51 (2025-01-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.50 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.49 (2025-01-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.48 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.47 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.46 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.45 (2024-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.44 (2024-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.43 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.42 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.41 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.40 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.39 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.38 (2024-10-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.37 (2024-09-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.36 (2024-09-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.35 (2024-09-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.34 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.33 (2024-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.32 (2024-09-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.31 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.30 (2024-08-26) - -* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility. - -# v1.17.29 (2024-08-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.28 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.27 (2024-07-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.26 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.25 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.24 (2024-07-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.23 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.22 (2024-06-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.21 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.20 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.19 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.18 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.17 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.16 (2024-05-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.15 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.14 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.13 (2024-05-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.12 (2024-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2024-04-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2023-12-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2023-11-21) - -* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider. - -# v1.16.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2023-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-11-14) - -* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. - -# v1.15.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-11-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.43 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.42 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.41 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.40 (2023-09-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.39 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.38 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.37 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.36 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.31 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.30 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.29 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.28 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.27 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.26 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.25 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.24 (2023-05-09) - -* No change notes available for this release. - -# v1.13.23 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.22 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.21 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.20 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.19 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.18 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.17 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.16 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.15 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.14 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2023-02-01) - -* No change notes available for this release. - -# v1.13.10 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2022-12-15) - -* **Bug Fix**: Unify logic between shared config and in finding home directory -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-11-11) - -* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 -* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider - -# v1.12.24 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.23 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.22 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.21 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.20 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.19 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.9 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-04-25) - -* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-23) - -* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-02-24) - -* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.5 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.4 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-09-10) - -* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. - -# v1.4.0 (2021-08-27) - -* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go deleted file mode 100644 index f6e2873ab906..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package credentials provides types for retrieving credentials from credentials sources. -*/ -package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go deleted file mode 100644 index 6ed71b42b283..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package ec2rolecreds provides the credentials provider implementation for -// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. -// -// # Concurrency and caching -// -// The Provider is not safe to be used concurrently, and does not provide any -// caching of credentials retrieved. You should wrap the Provider with a -// `aws.CredentialsCache` to provide concurrency safety, and caching of -// credentials. -// -// # Loading credentials with the SDK's AWS Config -// -// The EC2 Instance role credentials provider will automatically be the resolved -// credential provider in the credential chain if no other credential provider is -// resolved first. -// -// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance -// role for credentials, you specify a `credentials_source` property in the config -// profile the SDK will load. -// -// [default] -// credential_source = Ec2InstanceMetadata -// -// # Loading credentials with the Provider directly -// -// Another way to use the EC2 Instance role credentials provider is to create it -// directly and assign it as the credentials provider for an API client. -// -// The following example creates a credentials provider for a command, and wraps -// it with the CredentialsCache before assigning the provider to the Amazon S3 API -// client's Credentials option. -// -// provider := imds.New(imds.Options{}) -// -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) -// -// If you need more control, you can set the configuration options on the -// credentials provider using the imds.Options type to configure the EC2 IMDS -// API Client and ExpiryWindow of the retrieved credentials. -// -// provider := imds.New(imds.Options{ -// // See imds.Options type's documentation for more options available. -// Client: imds.New(Options{ -// HTTPClient: customHTTPClient, -// }), -// -// // Modify how soon credentials expire prior to their original expiry time. -// ExpiryWindow: 5 * time.Minute, -// }) -// -// # EC2 IMDS API Client -// -// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on -// configuring the client, and options available. -package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go deleted file mode 100644 index a95e6c8bdd6e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go +++ /dev/null @@ -1,241 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "math" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the -// GetMetadata operation. -type GetMetadataAPIClient interface { - GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) -} - -// A Provider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// The New function must be used to create the with a custom EC2 IMDS client. -// -// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ -// o.Client = imds.New(imds.Options{/* custom options */}) -// }) -type Provider struct { - options Options -} - -// Options is a list of user settable options for setting the behavior of the Provider. -type Options struct { - // The API client that will be used by the provider to make GetMetadata API - // calls to EC2 IMDS. - // - // If nil, the provider will default to the EC2 IMDS client. - Client GetMetadataAPIClient - - // The chain of providers that was used to create this provider - // These values are for reporting purposes and are not meant to be set up directly - CredentialSources []aws.CredentialSource -} - -// New returns an initialized Provider value configured to retrieve -// credentials from EC2 Instance Metadata service. -func New(optFns ...func(*Options)) *Provider { - options := Options{} - - for _, fn := range optFns { - fn(&options) - } - - if options.Client == nil { - options.Client = imds.New(imds.Options{}) - } - - return &Provider{ - options: options, - } -} - -// Retrieve retrieves credentials from the EC2 service. Error will be returned -// if the request fails, or unable to extract the desired credentials. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - credsList, err := requestCredList(ctx, p.options.Client) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - if len(credsList) == 0 { - return aws.Credentials{Source: ProviderName}, - fmt.Errorf("unexpected empty EC2 IMDS role list") - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, p.options.Client, credsName) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - creds := aws.Credentials{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - Source: ProviderName, - - CanExpire: true, - Expires: roleCreds.Expiration, - } - - // Cap role credentials Expires to 1 hour so they can be refreshed more - // often. Jitter will be applied credentials cache if being used. - if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { - creds.Expires = anHour - } - - return creds, nil -} - -// HandleFailToRefresh will extend the credentials Expires time if it it is -// expired. If the credentials will not expire within the minimum time, they -// will be returned. -// -// If the credentials cannot expire, the original error will be returned. -func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( - aws.Credentials, error, -) { - if !prevCreds.CanExpire { - return aws.Credentials{}, err - } - - if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { - return prevCreds, nil - } - - newCreds := prevCreds - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) - } - - // Random distribution of [5,15) minutes. - expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute - newCreds.Expires = sdk.NowTime().Add(expireOffset) - - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) - - return newCreds, nil -} - -// AdjustExpiresBy will adds the passed in duration to the passed in -// credential's Expires time, unless the time until Expires is less than 15 -// minutes. Returns the credentials, even if not updated. -func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( - aws.Credentials, error, -) { - if !creds.CanExpire { - return creds, nil - } - if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { - return creds, nil - } - - creds.Expires = creds.Expires.Add(dur) - return creds, nil -} - -// ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. If -// there are no credentials, or there is an error making or receiving the -// request -func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { - resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ - Path: iamSecurityCredsPath, - }) - if err != nil { - return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) - } - defer resp.Content.Close() - - credsList := []string{} - s := bufio.NewScanner(resp.Content) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ - Path: path.Join(iamSecurityCredsPath, credsName), - }) - if err != nil { - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", - credsName, err) - } - defer resp.Content.Close() - - var respCreds ec2RoleCredRespBody - if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", - credsName, err) - } - - if !strings.EqualFold(respCreds.Code, "Success") { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", - credsName, - &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) - } - - return respCreds, nil -} - -// ProviderSources returns the credential chain that was used to construct this provider -func (p *Provider) ProviderSources() []aws.CredentialSource { - if p.options.CredentialSources == nil { - return []aws.CredentialSource{aws.CredentialSourceIMDS} - } // If no source has been set, assume this is used directly which means just call to assume role - return p.options.CredentialSources -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go deleted file mode 100644 index c3f5dadcec9a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go deleted file mode 100644 index dc291c97cd7e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ /dev/null @@ -1,165 +0,0 @@ -package client - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/smithy-go" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ServiceID is the client identifer -const ServiceID = "endpoint-credentials" - -// HTTPClient is a client for sending HTTP requests -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Options is the endpoint client configurable options -type Options struct { - // The endpoint to retrieve credentials from - Endpoint string - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - Retryer aws.Retryer - - // Set of options to modify how the credentials operation is invoked. - APIOptions []func(*smithymiddleware.Stack) error -} - -// Copy creates a copy of the API options. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - return to -} - -// Client is an client for retrieving AWS credentials from an endpoint -type Client struct { - options Options -} - -// New constructs a new Client from the given options -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - if options.HTTPClient == nil { - options.HTTPClient = awshttp.NewBuildableClient() - } - - if options.Retryer == nil { - // Amazon-owned implementations of this endpoint are known to sometimes - // return plaintext responses (i.e. no Code) like normal, add a few - // additional status codes - options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) { - o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{ - Codes: map[int]struct{}{ - http.StatusTooManyRequests: {}, - }, - }) - }) - } - - for _, fn := range optFns { - fn(&options) - } - - client := &Client{ - options: options, - } - - return client -} - -// GetCredentialsInput is the input to send with the endpoint service to receive credentials. -type GetCredentialsInput struct { - AuthorizationToken string -} - -// GetCredentials retrieves credentials from credential endpoint -func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { - stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) - options := c.options.Copy() - for _, fn := range optFns { - fn(&options) - } - - stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) - stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) - stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) - addProtocolFinalizerMiddlewares(stack, options, "GetCredentials") - retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) - middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) - smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) - smithyhttp.AddCloseResponseBodyMiddleware(stack) - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, err - } - } - - handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, _, err := handler.Handle(ctx, params) - if err != nil { - return nil, err - } - - return result.(*GetCredentialsOutput), err -} - -// GetCredentialsOutput is the response from the credential endpoint -type GetCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string - AccountID string -} - -// EndpointError is an error returned from the endpoint service -type EndpointError struct { - Code string `json:"code"` - Message string `json:"message"` - Fault smithy.ErrorFault `json:"-"` - statusCode int `json:"-"` -} - -// Error is the error mesage string -func (e *EndpointError) Error() string { - return fmt.Sprintf("%s: %s", e.Code, e.Message) -} - -// ErrorCode is the error code returned by the endpoint -func (e *EndpointError) ErrorCode() string { - return e.Code -} - -// ErrorMessage is the error message returned by the endpoint -func (e *EndpointError) ErrorMessage() string { - return e.Message -} - -// ErrorFault indicates error fault classification -func (e *EndpointError) ErrorFault() smithy.ErrorFault { - return e.Fault -} - -// HTTPStatusCode implements retry.HTTPStatusCode. -func (e *EndpointError) HTTPStatusCode() int { - return e.statusCode -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go deleted file mode 100644 index 748ee67244ef..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go deleted file mode 100644 index f2820d20eac7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/aws/smithy-go" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type buildEndpoint struct { - Endpoint string -} - -func (b *buildEndpoint) ID() string { - return "BuildEndpoint" -} - -func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( - out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) - } - - if len(b.Endpoint) == 0 { - return out, metadata, fmt.Errorf("endpoint not provided") - } - - parsed, err := url.Parse(b.Endpoint) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) - } - - request.URL = parsed - - return next.HandleBuild(ctx, in) -} - -type serializeOpGetCredential struct{} - -func (s *serializeOpGetCredential) ID() string { - return "OperationSerializer" -} - -func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( - out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) - } - - params, ok := in.Parameters.(*GetCredentialsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) - } - - const acceptHeader = "Accept" - request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") - - if len(params.AuthorizationToken) > 0 { - const authHeader = "Authorization" - request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) - } - - return next.HandleSerialize(ctx, in) -} - -type deserializeOpGetCredential struct{} - -func (d *deserializeOpGetCredential) ID() string { - return "OperationDeserializer" -} - -func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( - out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, deserializeError(response) - } - - var shape *GetCredentialsOutput - if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} - } - - out.Result = shape - return out, metadata, err -} - -func deserializeError(response *smithyhttp.Response) error { - // we could be talking to anything, json isn't guaranteed - // see https://github.com/aws/aws-sdk-go-v2/issues/2316 - if response.Header.Get("Content-Type") == "application/json" { - return deserializeJSONError(response) - } - - msg, err := io.ReadAll(response.Body) - if err != nil { - return &smithy.DeserializationError{ - Err: fmt.Errorf("read response, %w", err), - } - } - - return &EndpointError{ - // no sensible value for Code - Message: string(msg), - Fault: stof(response.StatusCode), - statusCode: response.StatusCode, - } -} - -func deserializeJSONError(response *smithyhttp.Response) error { - var errShape *EndpointError - if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil { - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode error message, %w", err), - } - } - - errShape.Fault = stof(response.StatusCode) - errShape.statusCode = response.StatusCode - return errShape -} - -// maps HTTP status code to smithy ErrorFault -func stof(code int) smithy.ErrorFault { - if code >= 500 { - return smithy.FaultServer - } - return smithy.FaultClient -} - -func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %w", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %w", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go deleted file mode 100644 index c8ac6d9ff18a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "context" - "fmt" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" - "github.com/aws/smithy-go/middleware" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -type getCredentialsAPIClient interface { - GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) -} - -// Provider satisfies the aws.CredentialsProvider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - // The AWS Client to make HTTP requests to the endpoint with. The endpoint - // the request will be made to is provided by the aws.Config's - // EndpointResolver. - client getCredentialsAPIClient - - options Options -} - -// HTTPClient is a client for sending HTTP requests -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Options is structure of configurable options for Provider -type Options struct { - // Endpoint to retrieve credentials from. Required - Endpoint string - - // HTTPClient to handle sending HTTP requests to the target endpoint. - HTTPClient HTTPClient - - // Set of options to modify how the credentials operation is invoked. - APIOptions []func(*middleware.Stack) error - - // The Retryer to be used for determining whether a failed requested should be retried - Retryer aws.Retryer - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - // - // When constructed from environment, the provider will use the value of - // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token - // - // Will be overridden if AuthorizationTokenProvider is configured - AuthorizationToken string - - // Optional auth provider func to dynamically load the auth token from a file - // everytime a credential is retrieved - // - // When constructed from environment, the provider will read and use the content - // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable - // as the auth token everytime credentials are retrieved - // - // Will override AuthorizationToken if configured - AuthorizationTokenProvider AuthTokenProvider - - // The chain of providers that was used to create this provider - // These values are for reporting purposes and are not meant to be set up directly - CredentialSources []aws.CredentialSource -} - -// AuthTokenProvider defines an interface to dynamically load a value to be passed -// for the Authorization header of a credentials request. -type AuthTokenProvider interface { - GetToken() (string, error) -} - -// TokenProviderFunc is a func type implementing AuthTokenProvider interface -// and enables customizing token provider behavior -type TokenProviderFunc func() (string, error) - -// GetToken func retrieves auth token according to TokenProviderFunc implementation -func (p TokenProviderFunc) GetToken() (string, error) { - return p() -} - -// New returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func New(endpoint string, optFns ...func(*Options)) *Provider { - o := Options{ - Endpoint: endpoint, - } - - for _, fn := range optFns { - fn(&o) - } - - p := &Provider{ - client: client.New(client.Options{ - HTTPClient: o.HTTPClient, - Endpoint: o.Endpoint, - APIOptions: o.APIOptions, - Retryer: o.Retryer, - }), - options: o, - } - - return p -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) - } - - creds := aws.Credentials{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - Source: ProviderName, - AccountID: resp.AccountID, - } - - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = *resp.Expiration - } - - return creds, nil -} - -func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { - authToken, err := p.resolveAuthToken() - if err != nil { - return nil, fmt.Errorf("resolve auth token: %v", err) - } - - return p.client.GetCredentials(ctx, &client.GetCredentialsInput{ - AuthorizationToken: authToken, - }) -} - -func (p *Provider) resolveAuthToken() (string, error) { - authToken := p.options.AuthorizationToken - - var err error - if p.options.AuthorizationTokenProvider != nil { - authToken, err = p.options.AuthorizationTokenProvider.GetToken() - if err != nil { - return "", err - } - } - - if strings.ContainsAny(authToken, "\r\n") { - return "", fmt.Errorf("authorization token contains invalid newline sequence") - } - - return authToken, nil -} - -var _ aws.CredentialProviderSource = (*Provider)(nil) - -// ProviderSources returns the credential chain that was used to construct this provider -func (p *Provider) ProviderSources() []aws.CredentialSource { - if p.options.CredentialSources == nil { - return []aws.CredentialSource{aws.CredentialSourceHTTP} - } - return p.options.CredentialSources -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go deleted file mode 100644 index 9d3c95a4e8c1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package credentials - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go deleted file mode 100644 index a3137b8fa9ba..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package processcreds is a credentials provider to retrieve credentials from a -// external CLI invoked process. -// -// WARNING: The following describes a method of sourcing credentials from an external -// process. This can potentially be dangerous, so proceed with caution. Other -// credential providers should be preferred if at all possible. If using this -// option, you should make sure that the config file is as locked down as possible -// using security best practices for your operating system. -// -// # Concurrency and caching -// -// The Provider is not safe to be used concurrently, and does not provide any -// caching of credentials retrieved. You should wrap the Provider with a -// `aws.CredentialsCache` to provide concurrency safety, and caching of -// credentials. -// -// # Loading credentials with the SDKs AWS Config -// -// You can use credentials from a AWS shared config `credential_process` in a -// variety of ways. -// -// One way is to setup your shared config file, located in the default -// location, with the `credential_process` key and the command you want to be -// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. -// -// [default] -// credential_process = /command/to/call -// -// Loading configuration using external will use the credential process to -// retrieve credentials. NOTE: If there are credentials in the profile you are -// using, the credential process will not be used. -// -// // Initialize a session to load credentials. -// cfg, _ := config.LoadDefaultConfig(context.TODO()) -// -// // Create S3 service client to use the credentials. -// svc := s3.NewFromConfig(cfg) -// -// # Loading credentials with the Provider directly -// -// Another way to use the credentials process provider is by using the -// `NewProvider` constructor to create the provider and providing a it with a -// command to be executed to retrieve credentials. -// -// The following example creates a credentials provider for a command, and wraps -// it with the CredentialsCache before assigning the provider to the Amazon S3 API -// client's Credentials option. -// -// // Create credentials using the Provider. -// provider := processcreds.NewProvider("/path/to/command") -// -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) -// -// If you need more control, you can set any configurable options in the -// credentials using one or more option functions. -// -// provider := processcreds.NewProvider("/path/to/command", -// func(o *processcreds.Options) { -// // Override the provider's default timeout -// o.Timeout = 2 * time.Minute -// }) -// -// You can also use your own `exec.Cmd` value by satisfying a value that satisfies -// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. -// -// // Create an exec.Cmd -// cmdBuilder := processcreds.NewCommandBuilderFunc( -// func(ctx context.Context) (*exec.Cmd, error) { -// cmd := exec.CommandContext(ctx, -// "customCLICommand", -// "-a", "argument", -// ) -// cmd.Env = []string{ -// "ENV_VAR_FOO=value", -// "ENV_VAR_BAR=other_value", -// } -// -// return cmd, nil -// }, -// ) -// -// // Create credentials using your exec.Cmd and custom timeout -// provider := processcreds.NewProviderCommand(cmdBuilder, -// func(opt *processcreds.Provider) { -// // optionally override the provider's default timeout -// opt.Timeout = 1 * time.Second -// }) -package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go deleted file mode 100644 index dfc6b2548ed5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ /dev/null @@ -1,296 +0,0 @@ -package processcreds - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProviderError is an error indicating failure initializing or executing the -// process credentials provider -type ProviderError struct { - Err error -} - -// Error returns the error message. -func (e *ProviderError) Error() string { - return fmt.Sprintf("process provider error: %v", e.Err) -} - -// Unwrap returns the underlying error the provider error wraps. -func (e *ProviderError) Unwrap() error { - return e.Err -} - -// Provider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type Provider struct { - // Provides a constructor for exec.Cmd that are invoked by the provider for - // retrieving credentials. Use this to provide custom creation of exec.Cmd - // with things like environment variables, or other configuration. - // - // The provider defaults to the DefaultNewCommand function. - commandBuilder NewCommandBuilder - - options Options -} - -// Options is the configuration options for configuring the Provider. -type Options struct { - // Timeout limits the time a process can run. - Timeout time.Duration - // The chain of providers that was used to create this provider - // These values are for reporting purposes and are not meant to be set up directly - CredentialSources []aws.CredentialSource -} - -// NewCommandBuilder provides the interface for specifying how command will be -// created that the Provider will use to retrieve credentials with. -type NewCommandBuilder interface { - NewCommand(context.Context) (*exec.Cmd, error) -} - -// NewCommandBuilderFunc provides a wrapper type around a function pointer to -// satisfy the NewCommandBuilder interface. -type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) - -// NewCommand calls the underlying function pointer the builder was initialized with. -func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { - return fn(ctx) -} - -// DefaultNewCommandBuilder provides the default NewCommandBuilder -// implementation used by the provider. It takes a command and arguments to -// invoke. The command will also be initialized with the current process -// environment variables, stderr, and stdin pipes. -type DefaultNewCommandBuilder struct { - Args []string -} - -// NewCommand returns an initialized exec.Cmd with the builder's initialized -// Args. The command is also initialized current process environment variables, -// stderr, and stdin pipes. -func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(b.Args) == 0 { - return nil, &ProviderError{ - Err: fmt.Errorf("failed to prepare command: command must not be empty"), - } - } - - cmdArgs = append(cmdArgs, b.Args...) - cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) - cmd.Env = os.Environ() - - cmd.Stderr = os.Stderr // display stderr on console for MFA - cmd.Stdin = os.Stdin // enable stdin for MFA - - return cmd, nil -} - -// NewProvider returns a pointer to a new Credentials object wrapping the -// Provider. -// -// The provider defaults to the DefaultNewCommandBuilder for creating command -// the Provider will use to retrieve credentials with. -func NewProvider(command string, options ...func(*Options)) *Provider { - var args []string - - // Ensure that the command arguments are not set if the provided command is - // empty. This will error out when the command is executed since no - // arguments are specified. - if len(command) > 0 { - args = []string{command} - } - - commanBuilder := DefaultNewCommandBuilder{ - Args: args, - } - return NewProviderCommand(commanBuilder, options...) -} - -// NewProviderCommand returns a pointer to a new Credentials object with the -// specified command, and default timeout duration. Use this to provide custom -// creation of exec.Cmd for options like environment variables, or other -// configuration. -func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { - p := &Provider{ - commandBuilder: builder, - options: Options{ - Timeout: DefaultTimeout, - }, - } - - for _, option := range options { - option(&p.options) - } - - return p -} - -// A CredentialProcessResponse is the AWS credentials format that must be -// returned when executing an external credential_process. -type CredentialProcessResponse struct { - // As of this writing, the Version key must be set to 1. This might - // increment over time as the structure evolves. - Version int - - // The access key ID that identifies the temporary security credentials. - AccessKeyID string `json:"AccessKeyId"` - - // The secret access key that can be used to sign requests. - SecretAccessKey string - - // The token that users must pass to the service API to use the temporary credentials. - SessionToken string - - // The date on which the current credentials expire. - Expiration *time.Time - - // The ID of the account for credentials - AccountID string `json:"AccountId"` -} - -// Retrieve executes the credential process command and returns the -// credentials, or error if the command fails. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - out, err := p.executeCredentialProcess(ctx) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - // Serialize and validate response - resp := &CredentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), - } - } - - if resp.Version != 1 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("wrong version in process output (not 1)"), - } - } - - if len(resp.AccessKeyID) == 0 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("missing AccessKeyId in process output"), - } - } - - if len(resp.SecretAccessKey) == 0 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("missing SecretAccessKey in process output"), - } - } - - creds := aws.Credentials{ - Source: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - AccountID: resp.AccountID, - } - - // Handle expiration - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = *resp.Expiration - } - - return creds, nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { - if p.options.Timeout >= 0 { - var cancelFunc func() - ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) - defer cancelFunc() - } - - cmd, err := p.commandBuilder.NewCommand(ctx) - if err != nil { - return nil, err - } - - // get creds json on process's stdout - output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) - if cmd.Stdout != nil { - cmd.Stdout = io.MultiWriter(cmd.Stdout, output) - } else { - cmd.Stdout = output - } - - execCh := make(chan error, 1) - go executeCommand(cmd, execCh) - - select { - case execError := <-execCh: - if execError == nil { - break - } - select { - case <-ctx.Done(): - return output.Bytes(), &ProviderError{ - Err: fmt.Errorf("credential process timed out: %w", execError), - } - default: - return output.Bytes(), &ProviderError{ - Err: fmt.Errorf("error in credential_process: %w", execError), - } - } - } - - out := output.Bytes() - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) - } - - return out, nil -} - -// ProviderSources returns the credential chain that was used to construct this provider -func (p *Provider) ProviderSources() []aws.CredentialSource { - if p.options.CredentialSources == nil { - return []aws.CredentialSource{aws.CredentialSourceProcess} - } - return p.options.CredentialSources -} - -func executeCommand(cmd *exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go deleted file mode 100644 index ece1e65f73bf..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package ssocreds provides a credential provider for retrieving temporary AWS -// credentials using an SSO access token. -// -// IMPORTANT: The provider in this package does not initiate or perform the AWS -// SSO login flow. The SDK provider expects that you have already performed the -// SSO login flow using AWS CLI using the "aws sso login" command, or by some -// other mechanism. The provider must find a valid non-expired access token for -// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not -// found, it is expired, or the file is malformed an error will be returned. -// -// # Loading AWS SSO credentials with the AWS shared configuration file -// -// You can use configure AWS SSO credentials from the AWS shared configuration file by -// specifying the required keys in the profile and referencing an sso-session: -// -// sso_session -// sso_account_id -// sso_role_name -// -// For example, the following defines a profile "devsso" and specifies the AWS -// SSO parameters that defines the target account, role, sign-on portal, and -// the region where the user portal is located. Note: all SSO arguments must be -// provided, or an error will be returned. -// -// [profile devsso] -// sso_session = dev-session -// sso_role_name = SSOReadOnlyRole -// sso_account_id = 123456789012 -// -// [sso-session dev-session] -// sso_start_url = https://my-sso-portal.awsapps.com/start -// sso_region = us-east-1 -// sso_registration_scopes = sso:account:access -// -// Using the config module, you can load the AWS SDK shared configuration, and -// specify that this profile be used to retrieve credentials. For example: -// -// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) -// if err != nil { -// return err -// } -// -// # Programmatically loading AWS SSO credentials directly -// -// You can programmatically construct the AWS SSO Provider in your application, -// and provide the necessary information to load and retrieve temporary -// credentials using an access token from ~/.aws/sso/cache. -// -// ssoClient := sso.NewFromConfig(cfg) -// ssoOidcClient := ssooidc.NewFromConfig(cfg) -// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") -// if err != nil { -// return err -// } -// -// var provider aws.CredentialsProvider -// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { -// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) -// }) -// -// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time -// provider = aws.NewCredentialsCache(provider) -// -// credentials, err := provider.Retrieve(context.TODO()) -// if err != nil { -// return err -// } -// -// It is important that you wrap the Provider with aws.CredentialsCache if you -// are programmatically constructing the provider directly. This prevents your -// application from accessing the cached access token and requesting new -// credentials each time the credentials are used. -// -// # Additional Resources -// -// Configuring the AWS CLI to use AWS Single Sign-On: -// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -// -// AWS Single Sign-On User Guide: -// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go deleted file mode 100644 index 46ae2f92310f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go +++ /dev/null @@ -1,233 +0,0 @@ -package ssocreds - -import ( - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" -) - -var osUserHomeDur = shareddefaults.UserHomeDir - -// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or -// error if unable get derive the path. Key that will be used to compute a SHA1 -// value that is hex encoded. -// -// Derives the filepath using the Key as: -// -// ~/.aws/sso/cache/.json -func StandardCachedTokenFilepath(key string) (string, error) { - homeDir := osUserHomeDur() - if len(homeDir) == 0 { - return "", fmt.Errorf("unable to get USER's home directory for cached token") - } - hash := sha1.New() - if _, err := hash.Write([]byte(key)); err != nil { - return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) - } - - cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" - - return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil -} - -type tokenKnownFields struct { - AccessToken string `json:"accessToken,omitempty"` - ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` - - RefreshToken string `json:"refreshToken,omitempty"` - ClientID string `json:"clientId,omitempty"` - ClientSecret string `json:"clientSecret,omitempty"` -} - -type token struct { - tokenKnownFields - UnknownFields map[string]interface{} `json:"-"` -} - -func (t token) MarshalJSON() ([]byte, error) { - fields := map[string]interface{}{} - - setTokenFieldString(fields, "accessToken", t.AccessToken) - setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) - - setTokenFieldString(fields, "refreshToken", t.RefreshToken) - setTokenFieldString(fields, "clientId", t.ClientID) - setTokenFieldString(fields, "clientSecret", t.ClientSecret) - - for k, v := range t.UnknownFields { - if _, ok := fields[k]; ok { - return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) - } - fields[k] = v - } - - return json.Marshal(fields) -} - -func setTokenFieldString(fields map[string]interface{}, key, value string) { - if value == "" { - return - } - fields[key] = value -} -func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { - if value == nil { - return - } - fields[key] = value -} - -func (t *token) UnmarshalJSON(b []byte) error { - var fields map[string]interface{} - if err := json.Unmarshal(b, &fields); err != nil { - return nil - } - - t.UnknownFields = map[string]interface{}{} - - for k, v := range fields { - var err error - switch k { - case "accessToken": - err = getTokenFieldString(v, &t.AccessToken) - case "expiresAt": - err = getTokenFieldRFC3339(v, &t.ExpiresAt) - case "refreshToken": - err = getTokenFieldString(v, &t.RefreshToken) - case "clientId": - err = getTokenFieldString(v, &t.ClientID) - case "clientSecret": - err = getTokenFieldString(v, &t.ClientSecret) - default: - t.UnknownFields[k] = v - } - - if err != nil { - return fmt.Errorf("field %q, %w", k, err) - } - } - - return nil -} - -func getTokenFieldString(v interface{}, value *string) error { - var ok bool - *value, ok = v.(string) - if !ok { - return fmt.Errorf("expect value to be string, got %T", v) - } - return nil -} - -func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { - var stringValue string - if err := getTokenFieldString(v, &stringValue); err != nil { - return err - } - - timeValue, err := parseRFC3339(stringValue) - if err != nil { - return err - } - - *value = &timeValue - return nil -} - -func loadCachedToken(filename string) (token, error) { - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) - } - - var t token - if err := json.Unmarshal(fileBytes, &t); err != nil { - return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) - } - - if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { - return token{}, fmt.Errorf( - "cached SSO token must contain accessToken and expiresAt fields") - } - - return t, nil -} - -func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { - tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) - if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { - return err - } - - if err := os.Rename(tmpFilename, filename); err != nil { - return fmt.Errorf("failed to replace old cached SSO token file, %w", err) - } - - return nil -} - -func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { - var f *os.File - f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) - if err != nil { - return fmt.Errorf("failed to create cached SSO token file %w", err) - } - - defer func() { - closeErr := f.Close() - if err == nil && closeErr != nil { - err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) - } - }() - - encoder := json.NewEncoder(f) - - if err = encoder.Encode(t); err != nil { - return fmt.Errorf("failed to serialize cached SSO token, %w", err) - } - - return nil -} - -type rfc3339 time.Time - -func parseRFC3339(v string) (rfc3339, error) { - parsed, err := time.Parse(time.RFC3339, v) - if err != nil { - return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) - } - - return rfc3339(parsed), nil -} - -func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { - var value string - - // Use JSON unmarshal to unescape the quoted value making use of JSON's - // unquoting rules. - if err = json.Unmarshal(bytes, &value); err != nil { - return err - } - - *r, err = parseRFC3339(value) - - return nil -} - -func (r *rfc3339) MarshalJSON() ([]byte, error) { - value := time.Time(*r).UTC().Format(time.RFC3339) - - // Use JSON unmarshal to unescape the quoted value making use of JSON's - // quoting rules. - return json.Marshal(value) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go deleted file mode 100644 index 3ed9cbb3ec07..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ /dev/null @@ -1,165 +0,0 @@ -package ssocreds - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sso" -) - -// ProviderName is the name of the provider used to specify the source of -// credentials. -const ProviderName = "SSOProvider" - -// GetRoleCredentialsAPIClient is a API client that implements the -// GetRoleCredentials operation. -type GetRoleCredentialsAPIClient interface { - GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( - *sso.GetRoleCredentialsOutput, error, - ) -} - -// Options is the Provider options structure. -type Options struct { - // The Client which is configured for the AWS Region where the AWS SSO user - // portal is located. - Client GetRoleCredentialsAPIClient - - // The AWS account that is assigned to the user. - AccountID string - - // The role name that is assigned to the user. - RoleName string - - // The URL that points to the organization's AWS Single Sign-On (AWS SSO) - // user portal. - StartURL string - - // The filepath the cached token will be retrieved from. If unset Provider will - // use the startURL to determine the filepath at. - // - // ~/.aws/sso/cache/.json - // - // If custom cached token filepath is used, the Provider's startUrl - // parameter will be ignored. - CachedTokenFilepath string - - // Used by the SSOCredentialProvider if a token configuration - // profile is used in the shared config - SSOTokenProvider *SSOTokenProvider - - // The chain of providers that was used to create this provider. - // These values are for reporting purposes and are not meant to be set up directly - CredentialSources []aws.CredentialSource -} - -// Provider is an AWS credential provider that retrieves temporary AWS -// credentials by exchanging an SSO login token. -type Provider struct { - options Options - - cachedTokenFilepath string -} - -// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The -// provided client is expected to be configured for the AWS Region where the -// AWS SSO user portal is located. -func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { - options := Options{ - Client: client, - AccountID: accountID, - RoleName: roleName, - StartURL: startURL, - } - - for _, fn := range optFns { - fn(&options) - } - - return &Provider{ - options: options, - cachedTokenFilepath: options.CachedTokenFilepath, - } -} - -// Retrieve retrieves temporary AWS credentials from the configured Amazon -// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present -// in ~/.aws/sso/cache. However, if a token provider configuration exists -// in the shared config, then we ought to use the token provider rather then -// direct access on the cached token. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - var accessToken *string - if p.options.SSOTokenProvider != nil { - token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) - if err != nil { - return aws.Credentials{}, err - } - accessToken = &token.Value - } else { - if p.cachedTokenFilepath == "" { - cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) - if err != nil { - return aws.Credentials{}, &InvalidTokenError{Err: err} - } - p.cachedTokenFilepath = cachedTokenFilepath - } - - tokenFile, err := loadCachedToken(p.cachedTokenFilepath) - if err != nil { - return aws.Credentials{}, &InvalidTokenError{Err: err} - } - - if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { - return aws.Credentials{}, &InvalidTokenError{} - } - accessToken = &tokenFile.AccessToken - } - - output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: accessToken, - AccountId: &p.options.AccountID, - RoleName: &p.options.RoleName, - }) - if err != nil { - return aws.Credentials{}, err - } - - return aws.Credentials{ - AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), - SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), - SessionToken: aws.ToString(output.RoleCredentials.SessionToken), - CanExpire: true, - Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), - Source: ProviderName, - AccountID: p.options.AccountID, - }, nil -} - -// ProviderSources returns the credential chain that was used to construct this provider -func (p *Provider) ProviderSources() []aws.CredentialSource { - if p.options.CredentialSources == nil { - return []aws.CredentialSource{aws.CredentialSourceSSO} - } - return p.options.CredentialSources -} - -// InvalidTokenError is the error type that is returned if loaded token has -// expired or is otherwise invalid. To refresh the SSO session run AWS SSO -// login with the corresponding profile. -type InvalidTokenError struct { - Err error -} - -func (i *InvalidTokenError) Unwrap() error { - return i.Err -} - -func (i *InvalidTokenError) Error() string { - const msg = "the SSO session has expired or is invalid" - if i.Err == nil { - return msg - } - return msg + ": " + i.Err.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go deleted file mode 100644 index 7f4fc5467722..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go +++ /dev/null @@ -1,147 +0,0 @@ -package ssocreds - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - "github.com/aws/smithy-go/auth/bearer" -) - -// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API -// client for calling CreateToken operation to refresh the SSO token. -type CreateTokenAPIClient interface { - CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( - *ssooidc.CreateTokenOutput, error, - ) -} - -// SSOTokenProviderOptions provides the options for configuring the -// SSOTokenProvider. -type SSOTokenProviderOptions struct { - // Client that can be overridden - Client CreateTokenAPIClient - - // The set of API Client options to be applied when invoking the - // CreateToken operation. - ClientOptions []func(*ssooidc.Options) - - // The path the file containing the cached SSO token will be read from. - // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. - CachedTokenFilepath string -} - -// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for -// Bearer Authentication. The SSOTokenProvider can only be used to refresh -// already cached SSO Tokens. This utility cannot perform the initial SSO -// create token. -// -// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in -// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's -// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with -// the smithy-go TokenCache, if the external configuration loaded configured -// for an SSO session. -// -// The initial SSO create token should be preformed with the AWS CLI before the -// Go application using the SSOTokenProvider will need to retrieve the SSO -// token. If the AWS CLI has not created the token cache file, this provider -// will return an error when attempting to retrieve the cached token. -// -// This provider will attempt to refresh the cached SSO token periodically if -// needed when RetrieveBearerToken is called. -// -// A utility such as the AWS CLI must be used to initially create the SSO -// session and cached token file. -// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -type SSOTokenProvider struct { - options SSOTokenProviderOptions -} - -var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) - -// NewSSOTokenProvider returns an initialized SSOTokenProvider that will -// periodically refresh the SSO token cached stored in the cachedTokenFilepath. -// The cachedTokenFilepath file's content will be rewritten by the token -// provider when the token is refreshed. -// -// The client must be configured for the AWS region the SSO token was created for. -func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { - options := SSOTokenProviderOptions{ - Client: client, - CachedTokenFilepath: cachedTokenFilepath, - } - for _, fn := range optFns { - fn(&options) - } - - provider := &SSOTokenProvider{ - options: options, - } - - return provider -} - -// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath -// the SSOTokenProvider was created with. If the token has expired -// RetrieveBearerToken will attempt to refresh it. If the token cannot be -// refreshed or is not present an error will be returned. -// -// A utility such as the AWS CLI must be used to initially create the SSO -// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { - cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) - if err != nil { - return bearer.Token{}, err - } - - if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { - cachedToken, err = p.refreshToken(ctx, cachedToken) - if err != nil { - return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) - } - } - - expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) - return bearer.Token{ - Value: cachedToken.AccessToken, - CanExpire: !expiresAt.IsZero(), - Expires: expiresAt, - }, nil -} - -func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { - if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { - return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") - } - - createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ - ClientId: &cachedToken.ClientID, - ClientSecret: &cachedToken.ClientSecret, - RefreshToken: &cachedToken.RefreshToken, - GrantType: aws.String("refresh_token"), - }, p.options.ClientOptions...) - if err != nil { - return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) - } - - expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) - - cachedToken.AccessToken = aws.ToString(createResult.AccessToken) - cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) - cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) - - fileInfo, err := os.Stat(p.options.CachedTokenFilepath) - if err != nil { - return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) - } - - if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { - return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) - } - - return cachedToken, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go deleted file mode 100644 index a469abdb7909..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go +++ /dev/null @@ -1,63 +0,0 @@ -package credentials - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -const ( - // StaticCredentialsName provides a name of Static provider - StaticCredentialsName = "StaticCredentials" -) - -// StaticCredentialsEmptyError is emitted when static credentials are empty. -type StaticCredentialsEmptyError struct{} - -func (*StaticCredentialsEmptyError) Error() string { - return "static credentials are empty" -} - -// A StaticCredentialsProvider is a set of credentials which are set, and will -// never expire. -type StaticCredentialsProvider struct { - Value aws.Credentials - // These values are for reporting purposes and are not meant to be set up directly - Source []aws.CredentialSource -} - -// ProviderSources returns the credential chain that was used to construct this provider -func (s StaticCredentialsProvider) ProviderSources() []aws.CredentialSource { - if s.Source == nil { - return []aws.CredentialSource{aws.CredentialSourceCode} // If no source has been set, assume this is used directly which means hardcoded creds - } - return s.Source -} - -// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS -// credentials passed in. -func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { - return StaticCredentialsProvider{ - Value: aws.Credentials{ - AccessKeyID: key, - SecretAccessKey: secret, - SessionToken: session, - }, - } -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { - v := s.Value - if v.AccessKeyID == "" || v.SecretAccessKey == "" { - return aws.Credentials{ - Source: StaticCredentialsName, - }, &StaticCredentialsEmptyError{} - } - - if len(v.Source) == 0 { - v.Source = StaticCredentialsName - } - - return v, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 1ccf71e77e69..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,338 +0,0 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. -// -// The SDK will ensure that per instance of credentials.Credentials all requests -// to refresh the credentials will be synchronized. But, the SDK is unable to -// ensure synchronous usage of the AssumeRoleProvider if the value is shared -// between multiple Credentials or service clients. -// -// # Assume Role -// -// To assume an IAM role using STS with the SDK you can create a new Credentials -// with the SDKs's stscreds package. -// -// // Initial credentials loaded from SDK's default credential chain. Such as -// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance -// // Role. These credentials will be used to to make the STS Assume Role API. -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN. -// stsSvc := sts.NewFromConfig(cfg) -// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -// -// # Assume Role with custom MFA Token provider -// -// To assume an IAM role with a MFA token you can either specify a custom MFA -// token provider or use the SDK's built in StdinTokenProvider that will prompt -// the user for a token code each time the credentials need to to be refreshed. -// Specifying a custom token provider allows you to control where the token -// code is retrieved from, and how it is refreshed. -// -// With a custom token provider, the provider is responsible for refreshing the -// token code when called. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// staticTokenProvider := func() (string, error) { -// return someTokenCode, nil -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = staticTokenProvider -// }) -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -// -// # Assume Role with MFA Token Provider -// -// To assume an IAM role with MFA for longer running tasks where the credentials -// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -// will allow the credential provider to prompt for new MFA token code when the -// role's credentials need to be refreshed. -// -// The StdinTokenProvider function is available to prompt on stdin to retrieve -// the MFA token code from the user. You can also implement custom prompts by -// satisfying the TokenProvider function signature. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = stscreds.StdinTokenProvider -// }) -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -package stscreds - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go-v2/service/sts/types" -) - -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Printf("Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. -type AssumeRoleAPIClient interface { - AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the -// credentials will be valid for. This value is only used by AssumeRoleProvider -// for specifying the default expiry duration of an assume role. -// -// Other providers such as WebIdentityRoleProvider do not use this value, and -// instead rely on STS API's default parameter handing to assign a default -// value. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - options AssumeRoleOptions -} - -// AssumeRoleOptions is the configurable options for AssumeRoleProvider -type AssumeRoleOptions struct { - // Client implementation of the AssumeRole operation. Required - Client AssumeRoleAPIClient - - // IAM Role ARN to be assumed. Required - RoleARN string - - // Session name, if you wish to uniquely identify this session. - RoleSessionName string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyARNs []types.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - SourceIdentity *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is set. - TokenProvider func() (string, error) - - // A list of session tags that you want to pass. Each session tag consists of a key - // name and an associated value. For more information about session tags, see - // Tagging STS Sessions - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - Tags []types.Tag - - // A list of keys for session tags that you want to set as transitive. If you set a - // tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. - TransitiveTagKeys []string - - // The chain of providers that was used to create this provider - // These values are for reporting purposes and are not meant to be set up directly - CredentialSources []aws.CredentialSource -} - -// NewAssumeRoleProvider constructs and returns a credentials provider that -// will retrieve credentials by assuming a IAM role using STS. -func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { - o := AssumeRoleOptions{ - Client: client, - RoleARN: roleARN, - } - - for _, fn := range optFns { - fn(&o) - } - - return &AssumeRoleProvider{ - options: o, - } -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { - // Apply defaults where parameters are not set. - if len(p.options.RoleSessionName) == 0 { - // Try to work out a role name that will hopefully end up unique. - p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) - } - if p.options.Duration == 0 { - // Expire as often as AWS permits. - p.options.Duration = DefaultDuration - } - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), - PolicyArns: p.options.PolicyARNs, - RoleArn: aws.String(p.options.RoleARN), - RoleSessionName: aws.String(p.options.RoleSessionName), - ExternalId: p.options.ExternalID, - SourceIdentity: p.options.SourceIdentity, - Tags: p.options.Tags, - TransitiveTagKeys: p.options.TransitiveTagKeys, - } - if p.options.Policy != nil { - input.Policy = p.options.Policy - } - if p.options.SerialNumber != nil { - if p.options.TokenProvider != nil { - input.SerialNumber = p.options.SerialNumber - code, err := p.options.TokenProvider() - if err != nil { - return aws.Credentials{}, err - } - input.TokenCode = aws.String(code) - } else { - return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") - } - } - - resp, err := p.options.Client.AssumeRole(ctx, input) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - var accountID string - if resp.AssumedRoleUser != nil { - accountID = getAccountID(resp.AssumedRoleUser) - } - - return aws.Credentials{ - AccessKeyID: *resp.Credentials.AccessKeyId, - SecretAccessKey: *resp.Credentials.SecretAccessKey, - SessionToken: *resp.Credentials.SessionToken, - Source: ProviderName, - - CanExpire: true, - Expires: *resp.Credentials.Expiration, - AccountID: accountID, - }, nil -} - -// ProviderSources returns the credential chain that was used to construct this provider -func (p *AssumeRoleProvider) ProviderSources() []aws.CredentialSource { - if p.options.CredentialSources == nil { - return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRole} - } // If no source has been set, assume this is used directly which means just call to assume role - return append(p.options.CredentialSources, aws.CredentialSourceSTSAssumeRole) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index 5f4286dda46d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,181 +0,0 @@ -package stscreds - -import ( - "context" - "fmt" - "io/ioutil" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go-v2/service/sts/types" -) - -var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() - -const ( - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. -type AssumeRoleWithWebIdentityAPIClient interface { - AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - options WebIdentityRoleOptions -} - -// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider -type WebIdentityRoleOptions struct { - // Client implementation of the AssumeRoleWithWebIdentity operation. Required - Client AssumeRoleWithWebIdentityAPIClient - - // JWT Token Provider. Required - TokenRetriever IdentityTokenRetriever - - // IAM Role ARN to assume. Required - RoleARN string - - // Session name, if you wish to uniquely identify this session. - RoleSessionName string - - // Expiry duration of the STS credentials. STS will assign a default expiry - // duration if this value is unset. This is different from the Duration - // option of AssumeRoleProvider, which automatically assigns 15 minutes if - // Duration is unset. - // - // See the STS AssumeRoleWithWebIdentity API reference guide for more - // information on defaults. - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - Duration time.Duration - - // An IAM policy in JSON format that you want to use as an inline session policy. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you - // want to use as managed session policies. The policies must exist in the - // same account as the role. - PolicyARNs []types.PolicyDescriptorType - - // The chain of providers that was used to create this provider - // These values are for reporting purposes and are not meant to be set up directly - CredentialSources []aws.CredentialSource -} - -// IdentityTokenRetriever is an interface for retrieving a JWT -type IdentityTokenRetriever interface { - GetIdentityToken() ([]byte, error) -} - -// IdentityTokenFile is for retrieving an identity token from the given file name -type IdentityTokenFile string - -// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte -func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { - b, err := ioutil.ReadFile(string(j)) - if err != nil { - return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) - } - - return b, nil -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.ClientAPI -func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { - o := WebIdentityRoleOptions{ - Client: client, - RoleARN: roleARN, - TokenRetriever: tokenRetriever, - } - - for _, fn := range optFns { - fn(&o) - } - - return &WebIdentityRoleProvider{options: o} -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { - b, err := p.options.TokenRetriever.GetIdentityToken() - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) - } - - sessionName := p.options.RoleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) - } - input := &sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.options.PolicyARNs, - RoleArn: &p.options.RoleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - } - if p.options.Duration != 0 { - // If set use the value, otherwise STS will assign a default expiration duration. - input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) - } - if p.options.Policy != nil { - input.Policy = p.options.Policy - } - - resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { - options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) - }) - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) - } - - var accountID string - if resp.AssumedRoleUser != nil { - accountID = getAccountID(resp.AssumedRoleUser) - } - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - - value := aws.Credentials{ - AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), - SessionToken: aws.ToString(resp.Credentials.SessionToken), - Source: WebIdentityProviderName, - CanExpire: true, - Expires: *resp.Credentials.Expiration, - AccountID: accountID, - } - return value, nil -} - -// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" -func getAccountID(u *types.AssumedRoleUser) string { - if u.Arn == nil { - return "" - } - parts := strings.Split(*u.Arn, ":") - if len(parts) < 5 { - return "" - } - return parts[4] -} - -// ProviderSources returns the credential chain that was used to construct this provider -func (p *WebIdentityRoleProvider) ProviderSources() []aws.CredentialSource { - if p.options.CredentialSources == nil { - return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRoleWebID} - } - return p.options.CredentialSources -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md deleted file mode 100644 index dcda88e2067e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ /dev/null @@ -1,473 +0,0 @@ -# v1.18.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2025-07-29) - -* **Feature**: Add config switch `DisableDefaultMaxBackoff` that allows you to disable the default maximum backoff (1 second) for IMDS calls retry attempt - -# v1.17.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.33 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.32 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.31 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.30 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.29 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.28 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.27 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.26 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.25 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.16.24 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.23 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.22 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.21 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.20 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2024-03-21) - -* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. - -# v1.15.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.11 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.10 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.9 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.8 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.7 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-11-02) - -* No change notes available for this release. - -# v1.14.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.10 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2023-03-14) - -* **Feature**: Add flag to disable IMDSv1 fallback - -# v1.12.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.19 (2022-10-24) - -* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-11) - -* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. -* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-07-15) - -* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go deleted file mode 100644 index 75edc4e9d63d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ /dev/null @@ -1,358 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ServiceID provides the unique name of this API client -const ServiceID = "ec2imds" - -// Client provides the API client for interacting with the Amazon EC2 Instance -// Metadata Service API. -type Client struct { - options Options -} - -// ClientEnableState provides an enumeration if the client is enabled, -// disabled, or default behavior. -type ClientEnableState = internalconfig.ClientEnableState - -// Enumeration values for ClientEnableState -const ( - ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior - ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled - ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled -) - -// EndpointModeState is an enum configuration variable describing the client endpoint mode. -// Not configurable directly, but used when using the NewFromConfig. -type EndpointModeState = internalconfig.EndpointModeState - -// Enumeration values for EndpointModeState -const ( - EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset - EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 - EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 -) - -const ( - disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Client endpoint options - endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - defaultIPv4Endpoint = "http://169.254.169.254" - defaultIPv6Endpoint = "http://[fd00:ec2::254]" -) - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - options.HTTPClient = resolveHTTPClient(options.HTTPClient) - - if options.Retryer == nil { - options.Retryer = retry.NewStandard() - } - if !options.DisableDefaultMaxBackoff { - options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) - } - - if options.ClientEnableState == ClientDefaultEnableState { - if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { - options.ClientEnableState = ClientDisabled - } - } - - if len(options.Endpoint) == 0 { - if v := os.Getenv(endpointEnvVar); len(v) != 0 { - options.Endpoint = v - } - } - - client := &Client{ - options: options, - } - - if client.options.tokenProvider == nil && !client.options.disableAPIToken { - client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) - } - - return client -} - -// NewFromConfig returns an initialized Client based the AWS SDK config, and -// functional options. Provide additional functional options to further -// configure the behavior of the client, such as changing the client's endpoint -// or adding custom middleware behavior. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), - HTTPClient: cfg.HTTPClient, - ClientLogMode: cfg.ClientLogMode, - Logger: cfg.Logger, - } - - if cfg.Retryer != nil { - opts.Retryer = cfg.Retryer() - } - - resolveClientEnableState(cfg, &opts) - resolveEndpointConfig(cfg, &opts) - resolveEndpointModeConfig(cfg, &opts) - resolveEnableFallback(cfg, &opts) - - return New(opts, optFns...) -} - -// Options provides the fields for configuring the API client's behavior. -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation - // call to modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The endpoint the client will use to retrieve EC2 instance metadata. - // - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. - // - // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT - // has a value the client will use the value of the environment variable as - // the endpoint for operation calls. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - Endpoint string - - // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. - // - // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. - // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. - // - // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. - EndpointMode EndpointModeState - - // The HTTP client to invoke API calls with. Defaults to client's default - // HTTP implementation if nil. - HTTPClient HTTPClient - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - Retryer aws.Retryer - - // Changes if the EC2 Instance Metadata client is enabled or not. Client - // will default to enabled if not set to ClientDisabled. When the client is - // disabled it will return an error for all operation calls. - // - // If ClientEnableState value is ClientDefaultEnableState (default value), - // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to - // "true", the client will be disabled. - // - // AWS_EC2_METADATA_DISABLED=true - ClientEnableState ClientEnableState - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // Configure IMDSv1 fallback behavior. By default, the client will attempt - // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] - // the client will return any errors encountered from attempting to fetch a token - // instead of silently using the insecure data flow of IMDSv1. - // - // See [configuring IMDS] for more information. - // - // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - EnableFallback aws.Ternary - - // By default, all IMDS client operations enforce a 5-second timeout. You - // can disable that behavior with this setting. - DisableDefaultTimeout bool - - // By default all IMDS client operations enforce a 1-second retry delay at maximum. - // You can disable that behavior with this setting. - DisableDefaultMaxBackoff bool - - // provides the caching of API tokens used for operation calls. If unset, - // the API token will not be retrieved for the operation. - tokenProvider *tokenProvider - - // option to disable the API token provider for testing. - disableAPIToken bool -} - -// HTTPClient provides the interface for a client making HTTP requests with the -// API. -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a copy of the API options. -func (o Options) Copy() Options { - to := o - to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) - return to -} - -// WithAPIOptions wraps the API middleware functions, as a functional option -// for the API Client Options. Use this helper to add additional functional -// options to the API client, or operation calls. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), - stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - for _, fn := range optFns { - fn(&options) - } - - if options.ClientEnableState == ClientDisabled { - return nil, metadata, &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: fmt.Errorf( - "access disabled to EC2 IMDS via client option, or %q environment variable", - disableClientEnvVar), - } - } - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - return nil, metadata, &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - return result, metadata, err -} - -const ( - // HTTP client constants - defaultDialerTimeout = 250 * time.Millisecond - defaultResponseHeaderTimeout = 500 * time.Millisecond -) - -func resolveHTTPClient(client HTTPClient) HTTPClient { - if client == nil { - client = awshttp.NewBuildableClient() - } - - if c, ok := client.(*awshttp.BuildableClient); ok { - client = c. - WithDialerOptions(func(d *net.Dialer) { - // Use a custom Dial timeout for the EC2 Metadata service to account - // for the possibility the application might not be running in an - // environment with the service present. The client should fail fast in - // this case. - d.Timeout = defaultDialerTimeout - }). - WithTransportOptions(func(tr *http.Transport) { - // Use a custom Transport timeout for the EC2 Metadata service to - // account for the possibility that the application might be running in - // a container, and EC2Metadata service drops the connection after a - // single IP Hop. The client should fail fast in this case. - tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout - }) - } - - return client -} - -func resolveClientEnableState(cfg aws.Config, options *Options) error { - if options.ClientEnableState != ClientDefaultEnableState { - return nil - } - value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.ClientEnableState = value - return nil -} - -func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { - if options.EndpointMode != EndpointModeStateUnset { - return nil - } - value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.EndpointMode = value - return nil -} - -func resolveEndpointConfig(cfg aws.Config, options *Options) error { - if len(options.Endpoint) != 0 { - return nil - } - value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.Endpoint = value - return nil -} - -func resolveEnableFallback(cfg aws.Config, options *Options) { - if options.EnableFallback != aws.UnknownTernary { - return - } - - disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) - if !ok { - return - } - - if disabled { - options.EnableFallback = aws.FalseTernary - } else { - options.EnableFallback = aws.TrueTernary - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go deleted file mode 100644 index af58b6bb1029..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go +++ /dev/null @@ -1,77 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getDynamicDataPath = "/latest/dynamic" - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { - if params == nil { - params = &GetDynamicDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, - addGetDynamicDataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetDynamicDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetDynamicDataInput provides the input parameters for the GetDynamicData -// operation. -type GetDynamicDataInput struct { - // The relative dynamic data path to retrieve. Can be empty string to - // retrieve a response containing a new line separated list of dynamic data - // resources available. - // - // Must not include the dynamic data base path. - // - // May include leading slash. If Path includes trailing slash the trailing - // slash will be included in the request for the resource. - Path string -} - -// GetDynamicDataOutput provides the output parameters for the GetDynamicData -// operation. -type GetDynamicDataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetDynamicData", - buildGetDynamicDataPath, - buildGetDynamicDataOutput) -} - -func buildGetDynamicDataPath(params interface{}) (string, error) { - p, ok := params.(*GetDynamicDataInput) - if !ok { - return "", fmt.Errorf("unknown parameter type %T", params) - } - - return appendURIPath(getDynamicDataPath, p.Path), nil -} - -func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetDynamicDataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go deleted file mode 100644 index 5111cc90cac4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go +++ /dev/null @@ -1,103 +0,0 @@ -package imds - -import ( - "context" - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getIAMInfoPath = getMetadataPath + "/iam/info" - -// GetIAMInfo retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetIAMInfo( - ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), -) ( - *GetIAMInfoOutput, error, -) { - if params == nil { - params = &GetIAMInfoInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, - addGetIAMInfoMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetIAMInfoOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. -type GetIAMInfoInput struct{} - -// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. -type GetIAMInfoOutput struct { - IAMInfo - - ResultMetadata middleware.Metadata -} - -func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetIAMInfo", - buildGetIAMInfoPath, - buildGetIAMInfoOutput, - ) -} - -func buildGetIAMInfoPath(params interface{}) (string, error) { - return getIAMInfoPath, nil -} - -func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(resp.Body, ringBuffer) - - imdsResult := &GetIAMInfoOutput{} - if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { - return nil, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode instance identity document, %w", err), - Snapshot: ringBuffer.Bytes(), - } - } - // Any code other success is an error - if !strings.EqualFold(imdsResult.Code, "success") { - return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", - imdsResult.Code) - } - - return imdsResult, nil -} - -// IAMInfo provides the shape for unmarshaling an IAM info from the metadata -// API. -type IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go deleted file mode 100644 index dc8c09edf034..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go +++ /dev/null @@ -1,110 +0,0 @@ -package imds - -import ( - "context" - "encoding/json" - "fmt" - "io" - "time" - - "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetInstanceIdentityDocument( - ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), -) ( - *GetInstanceIdentityDocumentOutput, error, -) { - if params == nil { - params = &GetInstanceIdentityDocumentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, - addGetInstanceIdentityDocumentMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetInstanceIdentityDocumentOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetInstanceIdentityDocumentInput provides the input parameters for -// GetInstanceIdentityDocument operation. -type GetInstanceIdentityDocumentInput struct{} - -// GetInstanceIdentityDocumentOutput provides the output parameters for -// GetInstanceIdentityDocument operation. -type GetInstanceIdentityDocumentOutput struct { - InstanceIdentityDocument - - ResultMetadata middleware.Metadata -} - -func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetInstanceIdentityDocument", - buildGetInstanceIdentityDocumentPath, - buildGetInstanceIdentityDocumentOutput, - ) -} - -func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { - return getInstanceIdentityDocumentPath, nil -} - -func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(resp.Body, ringBuffer) - - output := &GetInstanceIdentityDocumentOutput{} - if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { - return nil, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode instance identity document, %w", err), - Snapshot: ringBuffer.Bytes(), - } - } - - return output, nil -} - -// InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go deleted file mode 100644 index 869bfc9feb9d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go +++ /dev/null @@ -1,77 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getMetadataPath = "/latest/meta-data" - -// GetMetadata uses the path provided to request information from the Amazon -// EC2 Instance Metadata Service. The content will be returned as a string, or -// error if the request failed. -func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { - if params == nil { - params = &GetMetadataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, - addGetMetadataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetMetadataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetMetadataInput provides the input parameters for the GetMetadata -// operation. -type GetMetadataInput struct { - // The relative metadata path to retrieve. Can be empty string to retrieve - // a response containing a new line separated list of metadata resources - // available. - // - // Must not include the metadata base path. - // - // May include leading slash. If Path includes trailing slash the trailing slash - // will be included in the request for the resource. - Path string -} - -// GetMetadataOutput provides the output parameters for the GetMetadata -// operation. -type GetMetadataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetMetadata", - buildGetMetadataPath, - buildGetMetadataOutput) -} - -func buildGetMetadataPath(params interface{}) (string, error) { - p, ok := params.(*GetMetadataInput) - if !ok { - return "", fmt.Errorf("unknown parameter type %T", params) - } - - return appendURIPath(getMetadataPath, p.Path), nil -} - -func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetMetadataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go deleted file mode 100644 index 8c0572bb5c89..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go +++ /dev/null @@ -1,73 +0,0 @@ -package imds - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// GetRegion retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetRegion( - ctx context.Context, params *GetRegionInput, optFns ...func(*Options), -) ( - *GetRegionOutput, error, -) { - if params == nil { - params = &GetRegionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, - addGetRegionMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetRegionOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetRegionInput provides the input parameters for GetRegion operation. -type GetRegionInput struct{} - -// GetRegionOutput provides the output parameters for GetRegion operation. -type GetRegionOutput struct { - Region string - - ResultMetadata middleware.Metadata -} - -func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetRegion", - buildGetInstanceIdentityDocumentPath, - buildGetRegionOutput, - ) -} - -func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { - out, err := buildGetInstanceIdentityDocumentOutput(resp) - if err != nil { - return nil, err - } - - result, ok := out.(*GetInstanceIdentityDocumentOutput) - if !ok { - return nil, fmt.Errorf("unexpected instance identity document type, %T", out) - } - - region := result.Region - if len(region) == 0 { - return "", fmt.Errorf("instance metadata did not return a region value") - } - - return &GetRegionOutput{ - Region: region, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go deleted file mode 100644 index 1f9ee97a5b71..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go +++ /dev/null @@ -1,119 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - "strconv" - "strings" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getTokenPath = "/latest/api/token" -const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" - -// getToken uses the duration to return a token for EC2 IMDS, or an error if -// the request failed. -func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { - if params == nil { - params = &getTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, - addGetTokenMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*getTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type getTokenInput struct { - TokenTTL time.Duration -} - -type getTokenOutput struct { - Token string - TokenTTL time.Duration - - ResultMetadata middleware.Metadata -} - -func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { - err := addRequestMiddleware(stack, - options, - "PUT", - "GetToken", - buildGetTokenPath, - buildGetTokenOutput) - if err != nil { - return err - } - - err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) - if err != nil { - return err - } - - return nil -} - -func buildGetTokenPath(interface{}) (string, error) { - return getTokenPath, nil -} - -func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - ttlHeader := resp.Header.Get(tokenTTLHeader) - tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse API token, %w", err) - } - - var token strings.Builder - if _, err = io.Copy(&token, resp.Body); err != nil { - return nil, fmt.Errorf("unable to read API token, %w", err) - } - - return &getTokenOutput{ - Token: token.String(), - TokenTTL: time.Duration(tokenTTL) * time.Second, - }, nil -} - -type tokenTTLRequestHeader struct{} - -func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } -func (*tokenTTLRequestHeader) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) - } - - input, ok := in.Parameters.(*getTokenInput) - if !ok { - return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) - } - - req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go deleted file mode 100644 index 8903697244ae..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go +++ /dev/null @@ -1,61 +0,0 @@ -package imds - -import ( - "context" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getUserDataPath = "/latest/user-data" - -// GetUserData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { - if params == nil { - params = &GetUserDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, - addGetUserDataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetUserDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetUserDataInput provides the input parameters for the GetUserData -// operation. -type GetUserDataInput struct{} - -// GetUserDataOutput provides the output parameters for the GetUserData -// operation. -type GetUserDataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetUserData", - buildGetUserDataPath, - buildGetUserDataOutput) -} - -func buildGetUserDataPath(params interface{}) (string, error) { - return getUserDataPath, nil -} - -func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetUserDataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go deleted file mode 100644 index ad283cf825f4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go +++ /dev/null @@ -1,48 +0,0 @@ -package imds - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go deleted file mode 100644 index d5765c36b170..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package imds provides the API client for interacting with the Amazon EC2 -// Instance Metadata Service. -// -// All Client operation calls have a default timeout. If the operation is not -// completed before this timeout expires, the operation will be canceled. This -// timeout can be overridden through the following: -// - Set the options flag DisableDefaultTimeout -// - Provide a Context with a timeout or deadline with calling the client's operations. -// -// See the EC2 IMDS user guide for more information on using the API. -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html -package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go deleted file mode 100644 index d7540da3481e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go +++ /dev/null @@ -1,20 +0,0 @@ -package imds - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go deleted file mode 100644 index 48db67d7e2d4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package imds - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go deleted file mode 100644 index ce7745589325..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go +++ /dev/null @@ -1,114 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// ClientEnableState provides an enumeration if the client is enabled, -// disabled, or default behavior. -type ClientEnableState uint - -// Enumeration values for ClientEnableState -const ( - ClientDefaultEnableState ClientEnableState = iota - ClientDisabled - ClientEnabled -) - -// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode -type EndpointModeState uint - -// Enumeration values for ClientEnableState -const ( - EndpointModeStateUnset EndpointModeState = iota - EndpointModeStateIPv4 - EndpointModeStateIPv6 -) - -// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset -func (e *EndpointModeState) SetFromString(v string) error { - v = strings.TrimSpace(v) - - switch { - case len(v) == 0: - *e = EndpointModeStateUnset - case strings.EqualFold(v, "IPv6"): - *e = EndpointModeStateIPv6 - case strings.EqualFold(v, "IPv4"): - *e = EndpointModeStateIPv4 - default: - return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") - } - return nil -} - -// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. -type ClientEnableStateResolver interface { - GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) -} - -// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. -type EndpointModeResolver interface { - GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) -} - -// EndpointResolver is a config resolver interface for retrieving the endpoint. -type EndpointResolver interface { - GetEC2IMDSEndpoint() (string, bool, error) -} - -type v1FallbackDisabledResolver interface { - GetEC2IMDSV1FallbackDisabled() (bool, bool) -} - -// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. -func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(ClientEnableStateResolver); ok { - value, found, err = resolver.GetEC2IMDSClientEnableState() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. -func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(EndpointModeResolver); ok { - value, found, err = resolver.GetEC2IMDSEndpointMode() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. -func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(EndpointResolver); ok { - value, found, err = resolver.GetEC2IMDSEndpoint() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveV1FallbackDisabled ... -func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { - for _, source := range sources { - if resolver, ok := source.(v1FallbackDisabledResolver); ok { - if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { - return v, true - } - } - } - return false, false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go deleted file mode 100644 index 90cf4aeb3df3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go +++ /dev/null @@ -1,313 +0,0 @@ -package imds - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/url" - "path" - "time" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func addAPIRequestMiddleware(stack *middleware.Stack, - options Options, - operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), -) (err error) { - err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput) - if err != nil { - return err - } - - // Token Serializer build and state management. - if !options.disableAPIToken { - err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) - if err != nil { - return err - } - - err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) - if err != nil { - return err - } - } - - return nil -} - -func addRequestMiddleware(stack *middleware.Stack, - options Options, - method string, - operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), -) (err error) { - err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) - if err != nil { - return err - } - - // Operation timeout - err = stack.Initialize.Add(&operationTimeout{ - Disabled: options.DisableDefaultTimeout, - DefaultTimeout: defaultOperationTimeout, - }, middleware.Before) - if err != nil { - return err - } - - // Operation Serializer - err = stack.Serialize.Add(&serializeRequest{ - GetPath: getPath, - Method: method, - }, middleware.After) - if err != nil { - return err - } - - // Operation endpoint resolver - err = stack.Serialize.Insert(&resolveEndpoint{ - Endpoint: options.Endpoint, - EndpointMode: options.EndpointMode, - }, "OperationSerializer", middleware.Before) - if err != nil { - return err - } - - // Operation Deserializer - err = stack.Deserialize.Add(&deserializeResponse{ - GetOutput: getOutput, - }, middleware.After) - if err != nil { - return err - } - - err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: options.ClientLogMode.IsRequest(), - LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), - LogResponse: options.ClientLogMode.IsResponse(), - LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), - }, middleware.After) - if err != nil { - return err - } - - err = addSetLoggerMiddleware(stack, options) - if err != nil { - return err - } - - if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil { - return fmt.Errorf("add protocol finalizers: %w", err) - } - - // Retry support - return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ - Retryer: options.Retryer, - LogRetryAttempts: options.ClientLogMode.IsRetries(), - }) -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -type serializeRequest struct { - GetPath func(interface{}) (string, error) - Method string -} - -func (*serializeRequest) ID() string { - return "OperationSerializer" -} - -func (m *serializeRequest) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - reqPath, err := m.GetPath(in.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) - } - - request.Request.URL.Path = reqPath - request.Request.Method = m.Method - - return next.HandleSerialize(ctx, in) -} - -type deserializeResponse struct { - GetOutput func(*smithyhttp.Response) (interface{}, error) -} - -func (*deserializeResponse) ID() string { - return "OperationDeserializer" -} - -func (m *deserializeResponse) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf( - "unexpected transport response type, %T, want %T", out.RawResponse, resp) - } - defer resp.Body.Close() - - // read the full body so that any operation timeouts cleanup will not race - // the body being read. - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return out, metadata, fmt.Errorf("read response body failed, %w", err) - } - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - - // Anything that's not 200 |< 300 is error - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return out, metadata, &smithyhttp.ResponseError{ - Response: resp, - Err: fmt.Errorf("request to EC2 IMDS failed"), - } - } - - result, err := m.GetOutput(resp) - if err != nil { - return out, metadata, fmt.Errorf( - "unable to get deserialized result for response, %w", err, - ) - } - out.Result = result - - return out, metadata, err -} - -type resolveEndpoint struct { - Endpoint string - EndpointMode EndpointModeState -} - -func (*resolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *resolveEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - var endpoint string - if len(m.Endpoint) > 0 { - endpoint = m.Endpoint - } else { - switch m.EndpointMode { - case EndpointModeStateIPv6: - endpoint = defaultIPv6Endpoint - case EndpointModeStateIPv4: - fallthrough - case EndpointModeStateUnset: - endpoint = defaultIPv4Endpoint - default: - return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") - } - } - - req.URL, err = url.Parse(endpoint) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - return next.HandleSerialize(ctx, in) -} - -const ( - defaultOperationTimeout = 5 * time.Second -) - -// operationTimeout adds a timeout on the middleware stack if the Context the -// stack was called with does not have a deadline. The next middleware must -// complete before the timeout, or the context will be canceled. -// -// If DefaultTimeout is zero, no default timeout will be used if the Context -// does not have a timeout. -// -// The next middleware must also ensure that any resources that are also -// canceled by the stack's context are completely consumed before returning. -// Otherwise the timeout cleanup will race the resource being consumed -// upstream. -type operationTimeout struct { - Disabled bool - DefaultTimeout time.Duration -} - -func (*operationTimeout) ID() string { return "OperationTimeout" } - -func (m *operationTimeout) HandleInitialize( - ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, -) ( - output middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.Disabled { - return next.HandleInitialize(ctx, input) - } - - if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { - var cancelFn func() - ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) - defer cancelFn() - } - - return next.HandleInitialize(ctx, input) -} - -// appendURIPath joins a URI path component to the existing path with `/` -// separators between the path components. If the path being added ends with a -// trailing `/` that slash will be maintained. -func appendURIPath(base, add string) string { - reqPath := path.Join(base, add) - if len(add) != 0 && add[len(add)-1] == '/' { - reqPath += "/" - } - return reqPath -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %w", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %w", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go deleted file mode 100644 index 5703c6e16add..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go +++ /dev/null @@ -1,261 +0,0 @@ -package imds - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "net/http" - "sync" - "sync/atomic" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - // Headers for Token and TTL - tokenHeader = "x-aws-ec2-metadata-token" - defaultTokenTTL = 5 * time.Minute -) - -type tokenProvider struct { - client *Client - tokenTTL time.Duration - - token *apiToken - tokenMux sync.RWMutex - - disabled uint32 // Atomic updated -} - -func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { - return &tokenProvider{ - client: client, - tokenTTL: ttl, - } -} - -// apiToken provides the API token used by all operation calls for th EC2 -// Instance metadata service. -type apiToken struct { - token string - expires time.Time -} - -var timeNow = time.Now - -// Expired returns if the token is expired. -func (t *apiToken) Expired() bool { - // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry - // time is always based on reported wall-clock time. - return timeNow().Round(0).After(t.expires) -} - -func (t *tokenProvider) ID() string { return "APITokenProvider" } - -// HandleFinalize is the finalize stack middleware, that if the token provider is -// enabled, will attempt to add the cached API token to the request. If the API -// token is not cached, it will be retrieved in a separate API call, getToken. -// -// For retry attempts, handler must be added after attempt retryer. -// -// If request for getToken fails the token provider may be disabled from future -// requests, depending on the response status code. -func (t *tokenProvider) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if t.fallbackEnabled() && !t.enabled() { - // short-circuits to insecure data flow if token provider is disabled. - return next.HandleFinalize(ctx, input) - } - - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) - } - - tok, err := t.getToken(ctx) - if err != nil { - // If the error allows the token to downgrade to insecure flow allow that. - var bypassErr *bypassTokenRetrievalError - if errors.As(err, &bypassErr) { - return next.HandleFinalize(ctx, input) - } - - return out, metadata, fmt.Errorf("failed to get API token, %w", err) - } - - req.Header.Set(tokenHeader, tok.token) - - return next.HandleFinalize(ctx, input) -} - -// HandleDeserialize is the deserialize stack middleware for determining if the -// operation the token provider is decorating failed because of a 401 -// unauthorized status code. If the operation failed for that reason the token -// provider needs to be re-enabled so that it can start adding the API token to -// operation calls. -func (t *tokenProvider) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, input) - if err == nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) - } - - if resp.StatusCode == http.StatusUnauthorized { // unauthorized - t.enable() - err = &retryableError{Err: err, isRetryable: true} - } - - return out, metadata, err -} - -func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { - if t.fallbackEnabled() && !t.enabled() { - return nil, &bypassTokenRetrievalError{ - Err: fmt.Errorf("cannot get API token, provider disabled"), - } - } - - t.tokenMux.RLock() - tok = t.token - t.tokenMux.RUnlock() - - if tok != nil && !tok.Expired() { - return tok, nil - } - - tok, err = t.updateToken(ctx) - if err != nil { - return nil, err - } - - return tok, nil -} - -func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { - t.tokenMux.Lock() - defer t.tokenMux.Unlock() - - // Prevent multiple requests to update retrieving the token. - if t.token != nil && !t.token.Expired() { - tok := t.token - return tok, nil - } - - result, err := t.client.getToken(ctx, &getTokenInput{ - TokenTTL: t.tokenTTL, - }) - if err != nil { - var statusErr interface{ HTTPStatusCode() int } - if errors.As(err, &statusErr) { - switch statusErr.HTTPStatusCode() { - // Disable future get token if failed because of 403, 404, or 405 - case http.StatusForbidden, - http.StatusNotFound, - http.StatusMethodNotAllowed: - - if t.fallbackEnabled() { - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) - t.disable() - } - - // 400 errors are terminal, and need to be upstreamed - case http.StatusBadRequest: - return nil, err - } - } - - // Disable if request send failed or timed out getting response - var re *smithyhttp.RequestSendError - var ce *smithy.CanceledError - if errors.As(err, &re) || errors.As(err, &ce) { - atomic.StoreUint32(&t.disabled, 1) - } - - if !t.fallbackEnabled() { - // NOTE: getToken() is an implementation detail of some outer operation - // (e.g. GetMetadata). It has its own retries that have already been exhausted. - // Mark the underlying error as a terminal error. - err = &retryableError{Err: err, isRetryable: false} - return nil, err - } - - // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request - // and allow the request to proceed. Future requests _may_ re-attempt fetching a - // token if not disabled. - return nil, &bypassTokenRetrievalError{Err: err} - } - - tok := &apiToken{ - token: result.Token, - expires: timeNow().Add(result.TokenTTL), - } - t.token = tok - - return tok, nil -} - -// enabled returns if the token provider is current enabled or not. -func (t *tokenProvider) enabled() bool { - return atomic.LoadUint32(&t.disabled) == 0 -} - -// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise -func (t *tokenProvider) fallbackEnabled() bool { - switch t.client.options.EnableFallback { - case aws.FalseTernary: - return false - default: - return true - } -} - -// disable disables the token provider and it will no longer attempt to inject -// the token, nor request updates. -func (t *tokenProvider) disable() { - atomic.StoreUint32(&t.disabled, 1) -} - -// enable enables the token provide to start refreshing tokens, and adding them -// to the pending request. -func (t *tokenProvider) enable() { - t.tokenMux.Lock() - t.token = nil - t.tokenMux.Unlock() - atomic.StoreUint32(&t.disabled, 0) -} - -type bypassTokenRetrievalError struct { - Err error -} - -func (e *bypassTokenRetrievalError) Error() string { - return fmt.Sprintf("bypass token retrieval, %v", e.Err) -} - -func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } - -type retryableError struct { - Err error - isRetryable bool -} - -func (e *retryableError) RetryableError() bool { return e.isRetryable } - -func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md deleted file mode 100644 index 4600c083226b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ /dev/null @@ -1,768 +0,0 @@ -# v1.17.10 (2024-08-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2024-07-24) - -* **Documentation**: Clarify region hint and credential usage in HeadBucketRegion. - -# v1.17.8 (2024-07-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-07-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-07-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-06-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.25 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.24 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.23 (2024-06-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.22 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.21 (2024-05-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.20 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2024-05-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2024-05-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-04-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2024-03-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2024-03-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2024-02-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2024-02-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.15 (2024-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.14 (2024-01-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.13 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.12 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.11 (2024-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.9 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.8 (2023-12-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.7 (2023-12-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-11-28.3) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-11-28.2) - -* **Feature**: Add S3Express support. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-11-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-11-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-11-17) - -* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2023-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2023-11-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2023-11-09.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-11-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.92 (2023-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.91 (2023-10-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.90 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.89 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.88 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.87 (2023-09-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.86 (2023-09-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.85 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.84 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.83 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.82 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.81 (2023-08-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.80 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.79 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.78 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.77 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.76 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.75 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.74 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.73 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.72 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.71 (2023-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.70 (2023-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.69 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.68 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.67 (2023-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.66 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.65 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.64 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.63 (2023-04-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.62 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.61 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.60 (2023-03-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.59 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.58 (2023-03-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.57 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.56 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.55 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.54 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.53 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.52 (2023-02-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.51 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.50 (2023-02-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.49 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.48 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.47 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.46 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.45 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.44 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.43 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.42 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.41 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.40 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.39 (2022-11-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.38 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.37 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.36 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.35 (2022-10-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.34 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.33 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.32 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.31 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.30 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.29 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.28 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.27 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.26 (2022-08-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.25 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.24 (2022-08-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.23 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.22 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.21 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.18 (2022-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.16 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.11 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2022-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2022-05-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.8 (2022-05-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2022-04-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.6 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.5 (2022-04-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2022-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2022-01-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.5 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.4 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.4 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.3 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-09-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-06-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go deleted file mode 100644 index 4059f9851d73..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go +++ /dev/null @@ -1,37 +0,0 @@ -package manager - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/s3" -) - -// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation. -type DeleteObjectsAPIClient interface { - DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) -} - -// DownloadAPIClient is an S3 API client that can invoke the GetObject operation. -type DownloadAPIClient interface { - GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error) -} - -// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error) -} - -// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) -} - -// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload, -// CompleteMultipartUpload, and AbortMultipartUpload operations. -type UploadAPIClient interface { - PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error) - UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error) - CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error) - CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error) - AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go deleted file mode 100644 index d3b828979fcb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go +++ /dev/null @@ -1,23 +0,0 @@ -package manager - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -func validateSupportedARNType(bucket string) error { - if !arn.IsARN(bucket) { - return nil - } - - parsedARN, err := arn.Parse(bucket) - if err != nil { - return err - } - - if parsedARN.Service == "s3-object-lambda" { - return fmt.Errorf("manager does not support s3-object-lambda service ARNs") - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go deleted file mode 100644 index 8c7019529498..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go +++ /dev/null @@ -1,147 +0,0 @@ -package manager - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const bucketRegionHeader = "X-Amz-Bucket-Region" - -// GetBucketRegion will attempt to get the region for a bucket using the -// client's configured region to determine which AWS partition to perform the query on. -// -// A BucketNotFound error will be returned if the bucket does not exist in the -// AWS partition the client region belongs to. -// -// For example to get the region of a bucket which exists in "eu-central-1" -// you could provide a region hint of "us-west-2". -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) -// if err != nil { -// log.Println("error:", err) -// return -// } -// -// bucket := "my-bucket" -// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket) -// if err != nil { -// var bnf manager.BucketNotFound -// if errors.As(err, &bnf) { -// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket) -// } -// return -// } -// fmt.Printf("Bucket %s is in %s region\n", bucket, region) -// -// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing. -// -// bucketname.s3.us-west-2.amazonaws.com/ -// -// To configure the GetBucketRegion to make a request via the Amazon -// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. -// fips-us-gov-west-1) set the EndpointResolver on the config or client the -// utility is called with. -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithEndpointResolver( -// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { -// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil -// }), -// ) -// if err != nil { -// panic(err) -// } -// -// If buckets are public, you may use anonymous credential like so. -// -// manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket, func(o *s3.Options) { -// o.Credentials = nil -// // Or -// o.Credentials = aws.AnonymousCredentials{} -// }) -// -// The request with anonymous credentials will not be signed. -// Otherwise credentials would be required for private buckets. -func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) { - var captureBucketRegion deserializeBucketRegion - - clientOptionFns := make([]func(*s3.Options), len(optFns)+1) - clientOptionFns[0] = func(options *s3.Options) { - options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware) - } - copy(clientOptionFns[1:], optFns) - - _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{ - Bucket: aws.String(bucket), - }, clientOptionFns...) - if len(captureBucketRegion.BucketRegion) == 0 && err != nil { - var httpStatusErr interface { - HTTPStatusCode() int - } - if !errors.As(err, &httpStatusErr) { - return "", err - } - - if httpStatusErr.HTTPStatusCode() == http.StatusNotFound { - return "", &bucketNotFound{} - } - - return "", err - } - - return captureBucketRegion.BucketRegion, nil -} - -type deserializeBucketRegion struct { - BucketRegion string -} - -func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Add(d, middleware.After) -} - -func (d *deserializeBucketRegion) ID() string { - return "DeserializeBucketRegion" -} - -func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) - } - - d.BucketRegion = resp.Header.Get(bucketRegionHeader) - - return out, metadata, err -} - -// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion. -type BucketNotFound interface { - error - - isBucketNotFound() -} - -type bucketNotFound struct{} - -func (b *bucketNotFound) Error() string { - return "bucket not found" -} - -func (b *bucketNotFound) isBucketNotFound() {} - -var _ BucketNotFound = (*bucketNotFound)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go deleted file mode 100644 index e781aef610d4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go +++ /dev/null @@ -1,79 +0,0 @@ -package manager - -import ( - "io" -) - -// BufferedReadSeeker is buffered io.ReadSeeker -type BufferedReadSeeker struct { - r io.ReadSeeker - buffer []byte - readIdx, writeIdx int -} - -// NewBufferedReadSeeker returns a new BufferedReadSeeker -// if len(b) == 0 then the buffer will be initialized to 64 KiB. -func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { - if len(b) == 0 { - b = make([]byte, 64*1024) - } - return &BufferedReadSeeker{r: r, buffer: b} -} - -func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { - b.r = r - b.readIdx, b.writeIdx = 0, 0 -} - -// Read will read up len(p) bytes into p and will return -// the number of bytes read and any error that occurred. -// If the len(p) > the buffer size then a single read request -// will be issued to the underlying io.ReadSeeker for len(p) bytes. -// A Read request will at most perform a single Read to the underlying -// io.ReadSeeker, and may return < len(p) if serviced from the buffer. -func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return n, err - } - - if b.readIdx == b.writeIdx { - if len(p) >= len(b.buffer) { - n, err = b.r.Read(p) - return n, err - } - b.readIdx, b.writeIdx = 0, 0 - - n, err = b.r.Read(b.buffer) - if n == 0 { - return n, err - } - - b.writeIdx += n - } - - n = copy(p, b.buffer[b.readIdx:b.writeIdx]) - b.readIdx += n - - return n, err -} - -// Seek will position then underlying io.ReadSeeker to the given offset -// and will clear the buffer. -func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { - n, err := b.r.Seek(offset, whence) - - b.reset(b.r) - - return n, err -} - -// ReadAt will read up to len(p) bytes at the given file offset. -// This will result in the buffer being cleared. -func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { - _, err := b.Seek(off, io.SeekStart) - if err != nil { - return 0, err - } - - return b.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go deleted file mode 100644 index e2ab143b6c0a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows -// +build !windows - -package manager - -func defaultUploadBufferProvider() ReadSeekerWriteToProvider { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go deleted file mode 100644 index 1ae881c104aa..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package manager - -func defaultUploadBufferProvider() ReadSeekerWriteToProvider { - return NewBufferedReadSeekerWriteToPool(1024 * 1024) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go deleted file mode 100644 index 179fe10f4035..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows -// +build !windows - -package manager - -func defaultDownloadBufferProvider() WriterReadFromProvider { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go deleted file mode 100644 index 88887ff586e7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package manager - -func defaultDownloadBufferProvider() WriterReadFromProvider { - return NewPooledBufferedWriterReadFromProvider(1024 * 1024) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go deleted file mode 100644 index 31171a69875a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package manager provides utilities to upload and download objects from -// S3 concurrently. Helpful for when working with large objects. -package manager diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go deleted file mode 100644 index 5a9fe2dd34c0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go +++ /dev/null @@ -1,528 +0,0 @@ -package manager - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "sync" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/smithy-go/logging" -) - -const userAgentKey = "s3-transfer" - -// DefaultDownloadPartSize is the default range of bytes to get at a time when -// using Download(). -const DefaultDownloadPartSize = 1024 * 1024 * 5 - -// DefaultDownloadConcurrency is the default number of goroutines to spin up -// when using Download(). -const DefaultDownloadConcurrency = 5 - -// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download. -const DefaultPartBodyMaxRetries = 3 - -type errReadingBody struct { - err error -} - -func (e *errReadingBody) Error() string { - return fmt.Sprintf("failed to read part body: %v", e.err) -} - -func (e *errReadingBody) Unwrap() error { - return e.err -} - -// The Downloader structure that calls Download(). It is safe to call Download() -// on this structure for multiple objects and across concurrent goroutines. -// Mutating the Downloader's properties is not safe to be done concurrently. -type Downloader struct { - // The size (in bytes) to request from S3 for each part. - // The minimum allowed part size is 5MB, and if this value is set to zero, - // the DefaultDownloadPartSize value will be used. - // - // PartSize is ignored if the Range input parameter is provided. - PartSize int64 - - // PartBodyMaxRetries is the number of retry attempts to make for failed part downloads. - PartBodyMaxRetries int - - // Logger to send logging messages to - Logger logging.Logger - - // Enable Logging of part download retry attempts - LogInterruptedDownloads bool - - // The number of goroutines to spin up in parallel when sending parts. - // If this is set to zero, the DefaultDownloadConcurrency value will be used. - // - // Concurrency of 1 will download the parts sequentially. - // - // Concurrency is ignored if the Range input parameter is provided. - Concurrency int - - // An S3 client to use when performing downloads. - S3 DownloadAPIClient - - // List of client options that will be passed down to individual API - // operation requests made by the downloader. - ClientOptions []func(*s3.Options) - - // Defines the buffer strategy used when downloading a part. - // - // If a WriterReadFromProvider is given the Download manager - // will pass the io.WriterAt of the Download request to the provider - // and will use the returned WriterReadFrom from the provider as the - // destination writer when copying from http response body. - BufferProvider WriterReadFromProvider -} - -// WithDownloaderClientOptions appends to the Downloader's API request options. -func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) { - return func(d *Downloader) { - d.ClientOptions = append(d.ClientOptions, opts...) - } -} - -// NewDownloader creates a new Downloader instance to downloads objects from -// S3 in concurrent chunks. Pass in additional functional options to customize -// the downloader behavior. Requires a client.ConfigProvider in order to create -// a S3 service client. The session.Session satisfies the client.ConfigProvider -// interface. -// -// Example: -// -// // Load AWS Config -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create an S3 client using the loaded configuration -// s3.NewFromConfig(cfg) -// -// // Create a downloader passing it the S3 client -// downloader := manager.NewDownloader(s3.NewFromConfig(cfg)) -// -// // Create a downloader with the client and custom downloader options -// downloader := manager.NewDownloader(client, func(d *manager.Downloader) { -// d.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader { - d := &Downloader{ - S3: c, - PartSize: DefaultDownloadPartSize, - PartBodyMaxRetries: DefaultPartBodyMaxRetries, - Concurrency: DefaultDownloadConcurrency, - BufferProvider: defaultDownloadBufferProvider(), - } - for _, option := range options { - option(d) - } - - return d -} - -// Download downloads an object in S3 and writes the payload into w -// using concurrent GET requests. The n int64 returned is the size of the object downloaded -// in bytes. -// -// DownloadWithContext is the same as Download with the additional support for -// Context input parameters. The Context must not be nil. A nil Context will -// cause a panic. Use the Context to add deadlining, timeouts, etc. The -// DownloadWithContext may create sub-contexts for individual underlying -// requests. -// -// Additional functional options can be provided to configure the individual -// download. These options are copies of the Downloader instance Download is -// called from. Modifying the options will not impact the original Downloader -// instance. Use the WithDownloaderClientOptions helper function to pass in request -// options that will be applied to all API operations made with this downloader. -// -// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent -// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. In case you download -// files into memory do not forget to pre-allocate memory to avoid additional allocations -// and GC runs. -// -// Example: -// -// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput -// buf := make([]byte, int(headObject.ContentLength)) -// // wrap with aws.WriteAtBuffer -// w := s3manager.NewWriteAtBuffer(buf) -// // download file into the memory -// numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{ -// Bucket: aws.String(bucket), -// Key: aws.String(item), -// }) -// -// Specifying a Downloader.Concurrency of 1 will cause the Downloader to -// download the parts from S3 sequentially. -// -// It is safe to call this method concurrently across goroutines. -// -// If the GetObjectInput's Range value is provided that will cause the downloader -// to perform a single GetObjectInput request for that object's range. This will -// caused the part size, and concurrency configurations to be ignored. -func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { - if err := validateSupportedARNType(aws.ToString(input.Bucket)); err != nil { - return 0, err - } - - impl := downloader{w: w, in: input, cfg: d, ctx: ctx} - - // Copy ClientOptions - clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1) - clientOptions = append(clientOptions, func(o *s3.Options) { - o.APIOptions = append(o.APIOptions, - middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), - addFeatureUserAgent, // yes, there are two of these - ) - }) - clientOptions = append(clientOptions, impl.cfg.ClientOptions...) - impl.cfg.ClientOptions = clientOptions - - for _, option := range options { - option(&impl.cfg) - } - - // Ensures we don't need nil checks later on - impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger) - - impl.partBodyMaxRetries = d.PartBodyMaxRetries - - impl.totalBytes = -1 - if impl.cfg.Concurrency == 0 { - impl.cfg.Concurrency = DefaultDownloadConcurrency - } - - if impl.cfg.PartSize == 0 { - impl.cfg.PartSize = DefaultDownloadPartSize - } - - return impl.download() -} - -// downloader is the implementation structure used internally by Downloader. -type downloader struct { - ctx context.Context - cfg Downloader - - in *s3.GetObjectInput - w io.WriterAt - - wg sync.WaitGroup - m sync.Mutex - - pos int64 - totalBytes int64 - written int64 - err error - - partBodyMaxRetries int -} - -// download performs the implementation of the object download across ranged -// GETs. -func (d *downloader) download() (n int64, err error) { - // If range is specified fall back to single download of that range - // this enables the functionality of ranged gets with the downloader but - // at the cost of no multipart downloads. - if rng := aws.ToString(d.in.Range); len(rng) > 0 { - d.downloadRange(rng) - return d.written, d.err - } - - // Spin off first worker to check additional header information - d.getChunk() - - if total := d.getTotalBytes(); total >= 0 { - // Spin up workers - ch := make(chan dlchunk, d.cfg.Concurrency) - - for i := 0; i < d.cfg.Concurrency; i++ { - d.wg.Add(1) - go d.downloadPart(ch) - } - - // Assign work - for d.getErr() == nil { - if d.pos >= total { - break // We're finished queuing chunks - } - - // Queue the next range of bytes to read. - ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} - d.pos += d.cfg.PartSize - } - - // Wait for completion - close(ch) - d.wg.Wait() - } else { - // Checking if we read anything new - for d.err == nil { - d.getChunk() - } - - // We expect a 416 error letting us know we are done downloading the - // total bytes. Since we do not know the content's length, this will - // keep grabbing chunks of data until the range of bytes specified in - // the request is out of range of the content. Once, this happens, a - // 416 should occur. - var responseError interface { - HTTPStatusCode() int - } - if errors.As(d.err, &responseError) { - if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable { - d.err = nil - } - } - } - - // Return error - return d.written, d.err -} - -// downloadPart is an individual goroutine worker reading from the ch channel -// and performing a GetObject request on the data with a given byte range. -// -// If this is the first worker, this operation also resolves the total number -// of bytes to be read so that the worker manager knows when it is finished. -func (d *downloader) downloadPart(ch chan dlchunk) { - defer d.wg.Done() - for { - chunk, ok := <-ch - if !ok { - break - } - if d.getErr() != nil { - // Drain the channel if there is an error, to prevent deadlocking - // of download producer. - continue - } - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } - } -} - -// getChunk grabs a chunk of data from the body. -// Not thread safe. Should only used when grabbing data on a single thread. -func (d *downloader) getChunk() { - if d.getErr() != nil { - return - } - - chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} - d.pos += d.cfg.PartSize - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } -} - -// downloadRange downloads an Object given the passed in Byte-Range value. -// The chunk used down download the range will be configured for that range. -func (d *downloader) downloadRange(rng string) { - if d.getErr() != nil { - return - } - - chunk := dlchunk{w: d.w, start: d.pos} - // Ranges specified will short circuit the multipart download - chunk.withRange = rng - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } - - // Update the position based on the amount of data received. - d.pos = d.written -} - -// downloadChunk downloads the chunk from s3 -func (d *downloader) downloadChunk(chunk dlchunk) error { - var params s3.GetObjectInput - awsutil.Copy(¶ms, d.in) - - // Get the next byte range of data - params.Range = aws.String(chunk.ByteRange()) - - var n int64 - var err error - for retry := 0; retry <= d.partBodyMaxRetries; retry++ { - n, err = d.tryDownloadChunk(¶ms, &chunk) - if err == nil { - break - } - // Check if the returned error is an errReadingBody. - // If err is errReadingBody this indicates that an error - // occurred while copying the http response body. - // If this occurs we unwrap the err to set the underlying error - // and attempt any remaining retries. - if bodyErr, ok := err.(*errReadingBody); ok { - err = bodyErr.Unwrap() - } else { - return err - } - - chunk.cur = 0 - - d.cfg.Logger.Logf(logging.Debug, - "object part body download interrupted %s, err, %v, retrying attempt %d", - aws.ToString(params.Key), err, retry) - } - - d.incrWritten(n) - - return err -} - -func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) { - cleanup := func() {} - if d.cfg.BufferProvider != nil { - w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) - } - defer cleanup() - - resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...) - if err != nil { - return 0, err - } - d.setTotalBytes(resp) // Set total if not yet set. - - var src io.Reader = resp.Body - if d.cfg.BufferProvider != nil { - src = &suppressWriterAt{suppressed: src} - } - n, err := io.Copy(w, src) - resp.Body.Close() - if err != nil { - return n, &errReadingBody{err: err} - } - - return n, nil -} - -// getTotalBytes is a thread-safe getter for retrieving the total byte status. -func (d *downloader) getTotalBytes() int64 { - d.m.Lock() - defer d.m.Unlock() - - return d.totalBytes -} - -// setTotalBytes is a thread-safe setter for setting the total byte status. -// Will extract the object's total bytes from the Content-Range if the file -// will be chunked, or Content-Length. Content-Length is used when the response -// does not include a Content-Range. Meaning the object was not chunked. This -// occurs when the full file fits within the PartSize directive. -func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { - d.m.Lock() - defer d.m.Unlock() - - if d.totalBytes >= 0 { - return - } - - if resp.ContentRange == nil { - // ContentRange is nil when the full file contents is provided, and - // is not chunked. Use ContentLength instead. - if aws.ToInt64(resp.ContentLength) > 0 { - d.totalBytes = aws.ToInt64(resp.ContentLength) - return - } - } else { - parts := strings.Split(*resp.ContentRange, "/") - - total := int64(-1) - var err error - // Checking for whether or not a numbered total exists - // If one does not exist, we will assume the total to be -1, undefined, - // and sequentially download each chunk until hitting a 416 error - totalStr := parts[len(parts)-1] - if totalStr != "*" { - total, err = strconv.ParseInt(totalStr, 10, 64) - if err != nil { - d.err = err - return - } - } - - d.totalBytes = total - } -} - -func (d *downloader) incrWritten(n int64) { - d.m.Lock() - defer d.m.Unlock() - - d.written += n -} - -// getErr is a thread-safe getter for the error object -func (d *downloader) getErr() error { - d.m.Lock() - defer d.m.Unlock() - - return d.err -} - -// setErr is a thread-safe setter for the error object -func (d *downloader) setErr(e error) { - d.m.Lock() - defer d.m.Unlock() - - d.err = e -} - -// dlchunk represents a single chunk of data to write by the worker routine. -// This structure also implements an io.SectionReader style interface for -// io.WriterAt, effectively making it an io.SectionWriter (which does not -// exist). -type dlchunk struct { - w io.WriterAt - start int64 - size int64 - cur int64 - - // specifies the byte range the chunk should be downloaded with. - withRange string -} - -// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start -// position to its end (or EOF). -// -// If a range is specified on the dlchunk the size will be ignored when writing. -// as the total size may not of be known ahead of time. -func (c *dlchunk) Write(p []byte) (n int, err error) { - if c.cur >= c.size && len(c.withRange) == 0 { - return 0, io.EOF - } - - n, err = c.w.WriteAt(p, c.start+c.cur) - c.cur += int64(n) - - return -} - -// ByteRange returns a HTTP Byte-Range header value that should be used by the -// client to request the chunk's range. -func (c *dlchunk) ByteRange() string { - if len(c.withRange) != 0 { - return c.withRange - } - - return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go deleted file mode 100644 index fdb1a0fb3cc0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package manager - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go deleted file mode 100644 index 6b93a3bc443a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go +++ /dev/null @@ -1,251 +0,0 @@ -package manager - -import ( - "context" - "fmt" - "sync" -) - -type byteSlicePool interface { - Get(context.Context) (*[]byte, error) - Put(*[]byte) - ModifyCapacity(int) - SliceSize() int64 - Close() -} - -type maxSlicePool struct { - // allocator is defined as a function pointer to allow - // for test cases to instrument custom tracers when allocations - // occur. - allocator sliceAllocator - - slices chan *[]byte - allocations chan struct{} - capacityChange chan struct{} - - max int - sliceSize int64 - - mtx sync.RWMutex -} - -func newMaxSlicePool(sliceSize int64) *maxSlicePool { - p := &maxSlicePool{sliceSize: sliceSize} - p.allocator = p.newSlice - - return p -} - -var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") - -func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) { - // check if context is canceled before attempting to get a slice - // this ensures priority is given to the cancel case first - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - p.mtx.RLock() - - for { - select { - case bs, ok := <-p.slices: - p.mtx.RUnlock() - if !ok { - // attempt to get on a zero capacity pool - return nil, errZeroCapacity - } - return bs, nil - case <-ctx.Done(): - p.mtx.RUnlock() - return nil, ctx.Err() - default: - // pass - } - - select { - case _, ok := <-p.allocations: - p.mtx.RUnlock() - if !ok { - // attempt to get on a zero capacity pool - return nil, errZeroCapacity - } - return p.allocator(), nil - case <-ctx.Done(): - p.mtx.RUnlock() - return nil, ctx.Err() - default: - // In the event that there are no slices or allocations available - // This prevents some deadlock situations that can occur around sync.RWMutex - // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. - // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where - // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, - // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. - - // Short-circuit if the pool capacity is zero. - if p.max == 0 { - p.mtx.RUnlock() - return nil, errZeroCapacity - } - - // Since we will be releasing the read-lock we need to take the reference to the channel. - // Since channels are references we will still get notified if slices are added, or if - // the channel is closed due to a capacity modification. This specifically avoids a data race condition - // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. - c := p.capacityChange - - p.mtx.RUnlock() - - select { - case _ = <-c: - p.mtx.RLock() - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } -} - -func (p *maxSlicePool) Put(bs *[]byte) { - p.mtx.RLock() - defer p.mtx.RUnlock() - - if p.max == 0 { - return - } - - select { - case p.slices <- bs: - p.notifyCapacity() - default: - // If the new channel when attempting to add the slice then we drop the slice. - // The logic here is to prevent a deadlock situation if channel is already at max capacity. - // Allows us to reap allocations that are returned and are no longer needed. - } -} - -func (p *maxSlicePool) ModifyCapacity(delta int) { - if delta == 0 { - return - } - - p.mtx.Lock() - defer p.mtx.Unlock() - - p.max += delta - - if p.max == 0 { - p.empty() - return - } - - if p.capacityChange != nil { - close(p.capacityChange) - } - p.capacityChange = make(chan struct{}, p.max) - - origAllocations := p.allocations - p.allocations = make(chan struct{}, p.max) - - newAllocs := len(origAllocations) + delta - for i := 0; i < newAllocs; i++ { - p.allocations <- struct{}{} - } - - if origAllocations != nil { - close(origAllocations) - } - - origSlices := p.slices - p.slices = make(chan *[]byte, p.max) - if origSlices == nil { - return - } - - close(origSlices) - for bs := range origSlices { - select { - case p.slices <- bs: - default: - // If the new channel blocks while adding slices from the old channel - // then we drop the slice. The logic here is to prevent a deadlock situation - // if the new channel has a smaller capacity then the old. - } - } -} - -func (p *maxSlicePool) notifyCapacity() { - select { - case p.capacityChange <- struct{}{}: - default: - // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized - // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. - } -} - -func (p *maxSlicePool) SliceSize() int64 { - return p.sliceSize -} - -func (p *maxSlicePool) Close() { - p.mtx.Lock() - defer p.mtx.Unlock() - p.empty() -} - -func (p *maxSlicePool) empty() { - p.max = 0 - - if p.capacityChange != nil { - close(p.capacityChange) - p.capacityChange = nil - } - - if p.allocations != nil { - close(p.allocations) - for range p.allocations { - // drain channel - } - p.allocations = nil - } - - if p.slices != nil { - close(p.slices) - for range p.slices { - // drain channel - } - p.slices = nil - } -} - -func (p *maxSlicePool) newSlice() *[]byte { - bs := make([]byte, p.sliceSize) - return &bs -} - -type returnCapacityPoolCloser struct { - byteSlicePool - returnCapacity int -} - -func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { - if delta > 0 { - n.returnCapacity = -1 * delta - } - n.byteSlicePool.ModifyCapacity(delta) -} - -func (n *returnCapacityPoolCloser) Close() { - if n.returnCapacity < 0 { - n.byteSlicePool.ModifyCapacity(n.returnCapacity) - } -} - -type sliceAllocator func() *[]byte - -var newByteSlicePool = func(sliceSize int64) byteSlicePool { - return newMaxSlicePool(sliceSize) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go deleted file mode 100644 index ce117c32a130..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go +++ /dev/null @@ -1,65 +0,0 @@ -package manager - -import ( - "io" - "sync" -) - -// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker -type ReadSeekerWriteTo interface { - io.ReadSeeker - io.WriterTo -} - -// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt -// implementation. -type BufferedReadSeekerWriteTo struct { - *BufferedReadSeeker -} - -// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or -// an error occurs. Returns the number of bytes written and any error encountered during the write. -func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { - return io.Copy(writer, b.BufferedReadSeeker) -} - -// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker -type ReadSeekerWriteToProvider interface { - GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) -} - -// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse -// []byte slices for buffering parts in memory -type BufferedReadSeekerWriteToPool struct { - pool sync.Pool -} - -// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create -// a pool of reusable buffers . If size is less then < 64 KiB then the buffer -// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom -// respectively will default to copying 32 KiB. -func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { - if size < 65536 { - size = 65536 - } - - return &BufferedReadSeekerWriteToPool{ - pool: sync.Pool{New: func() interface{} { - return make([]byte, size) - }}, - } -} - -// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. -// The provided cleanup must be called after operations have been completed on the -// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. -func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { - buffer := p.pool.Get().([]byte) - - r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} - cleanup = func() { - p.pool.Put(buffer) - } - - return r, cleanup -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go deleted file mode 100644 index 968f907327fb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go +++ /dev/null @@ -1,187 +0,0 @@ -package manager - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's -// input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if yhe operation -// requires payload signing. -// -// Note: If using with S3 PutObject to stream an object upload. The SDK's S3 -// Upload Manager(s3manager.Uploader) provides support for streaming -// with the ability to retry network errors. -func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser { - return &ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// seekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func seekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case *ReaderSeekerCloser: - return v.GetLen() - } - - return computeSeekerLength(s) -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r *ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return computeSeekerLength(s) - } - - return -1, nil -} - -func computeSeekerLength(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, io.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, io.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, io.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r *ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r *ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r *ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r *ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go deleted file mode 100644 index d73a6d3b9135..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go +++ /dev/null @@ -1,884 +0,0 @@ -package manager - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "sort" - "sync" - - "github.com/aws/aws-sdk-go-v2/aws" - - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// MaxUploadParts is the maximum allowed number of parts in a multi-part upload -// on Amazon S3. -const MaxUploadParts int32 = 10000 - -// MinUploadPartSize is the minimum allowed part size when uploading a part to -// Amazon S3. -const MinUploadPartSize int64 = 1024 * 1024 * 5 - -// DefaultUploadPartSize is the default part size to buffer chunks of a -// payload into. -const DefaultUploadPartSize = MinUploadPartSize - -// DefaultUploadConcurrency is the default number of goroutines to spin up when -// using Upload(). -const DefaultUploadConcurrency = 5 - -// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned -// will satisfy this interface when a multi part upload failed to upload all -// chucks to S3. In the case of a failure the UploadID is needed to operate on -// the chunks, if any, which were uploaded. -// -// Example: -// -// u := manager.NewUploader(client) -// output, err := u.upload(context.Background(), input) -// if err != nil { -// var multierr manager.MultiUploadFailure -// if errors.As(err, &multierr) { -// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error()) -// } else { -// fmt.Printf("upload failure, %s\n", err.Error()) -// } -// } -type MultiUploadFailure interface { - error - - // UploadID returns the upload id for the S3 multipart upload that failed. - UploadID() string -} - -// A multiUploadError wraps the upload ID of a failed s3 multipart upload. -// Composed of BaseError for code, message, and original error -// -// Should be used for an error that occurred failing a S3 multipart upload, -// and a upload ID is available. If an uploadID is not available a more relevant -type multiUploadError struct { - err error - - // ID for multipart upload which failed. - uploadID string -} - -// batchItemError returns the string representation of the error. -// -// # See apierr.BaseError ErrorWithExtra for output format -// -// Satisfies the error interface. -func (m *multiUploadError) Error() string { - var extra string - if m.err != nil { - extra = fmt.Sprintf(", cause: %s", m.err.Error()) - } - return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra) -} - -// Unwrap returns the underlying error that cause the upload failure -func (m *multiUploadError) Unwrap() error { - return m.err -} - -// UploadID returns the id of the S3 upload which failed. -func (m *multiUploadError) UploadID() string { - return m.uploadID -} - -// UploadOutput represents a response from the Upload() call. -type UploadOutput struct { - // The URL where the object was uploaded to. - Location string - - // The ID for a multipart upload to S3. In the case of an error the error - // can be cast to the MultiUploadFailure interface to extract the upload ID. - // Will be empty string if multipart upload was not used, and the object - // was uploaded as a single PutObject call. - UploadID string - - // The list of parts that were uploaded and their checksums. Will be empty - // if multipart upload was not used, and the object was uploaded as a - // single PutObject call. - CompletedParts []types.CompletedPart - - // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool - - // The base64-encoded, 32-bit CRC32 checksum of the object. - ChecksumCRC32 *string - - // The base64-encoded, 32-bit CRC32C checksum of the object. - ChecksumCRC32C *string - - // The base64-encoded, 160-bit SHA-1 digest of the object. - ChecksumSHA1 *string - - // The base64-encoded, 256-bit SHA-256 digest of the object. - ChecksumSHA256 *string - - // Entity tag for the uploaded object. - ETag *string - - // If the object expiration is configured, this will contain the expiration date - // (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string - - // The object key of the newly created object. - Key *string - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged types.RequestCharged - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. - SSEKMSKeyId *string - - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS customer master key (CMK) in your - // initiate multipart upload request, the response includes this header. It - // confirms the encryption algorithm that Amazon S3 used to encrypt the object. - ServerSideEncryption types.ServerSideEncryption - - // The version of the object that was uploaded. Will only be populated if - // the S3 Bucket is versioned. If the bucket is not versioned this field - // will not be set. - VersionID *string -} - -// WithUploaderRequestOptions appends to the Uploader's API client options. -func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) { - return func(u *Uploader) { - u.ClientOptions = append(u.ClientOptions, opts...) - } -} - -// The Uploader structure that calls Upload(). It is safe to call Upload() -// on this structure for multiple objects and across concurrent goroutines. -// Mutating the Uploader's properties is not safe to be done concurrently. -// -// # Pre-computed Checksums -// -// Care must be taken when using pre-computed checksums the transfer upload -// manager. The format and value of the checksum differs based on if the upload -// will preformed as a single or multipart upload. -// -// Uploads that are smaller than the Uploader's PartSize will be uploaded using -// the PutObject API operation. Pre-computed checksum of the uploaded object's -// content are valid for these single part uploads. If the checksum provided -// does not match the uploaded content the upload will fail. -// -// Uploads that are larger than the Uploader's PartSize will be uploaded using -// multi-part upload. The Pre-computed checksums for these uploads are a -// checksum of checksums of each part. Not a checksum of the full uploaded -// bytes. With the format of "-", (e.g. -// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match -// this format, as matches the content uploaded, the upload will fail. -// -// ContentMD5 for multipart upload is explicitly ignored for multipart upload, -// and its value is suppressed. -// -// # Automatically Computed Checksums -// -// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput -// is set to a valid value, the SDK will automatically compute the checksum of -// the individual uploaded parts. The UploadOutput result from Upload will -// include the checksum of part checksums provided by S3 -// CompleteMultipartUpload API call. -type Uploader struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultUploadPartSize value will be used. - PartSize int64 - - // The number of goroutines to spin up in parallel per call to Upload when - // sending parts. If this is set to zero, the DefaultUploadConcurrency value - // will be used. - // - // The concurrency pool is not shared between calls to Upload. - Concurrency int - - // Setting this value to true will cause the SDK to avoid calling - // AbortMultipartUpload on a failure, leaving all successfully uploaded - // parts on S3 for manual recovery. - // - // Note that storing parts of an incomplete multipart upload counts towards - // space usage on S3 and will add additional costs if not cleaned up. - LeavePartsOnError bool - - // MaxUploadParts is the max number of parts which will be uploaded to S3. - // Will be used to calculate the partsize of the object to be uploaded. - // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). - // - // MaxUploadParts must not be used to limit the total number of bytes uploaded. - // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) - // instead. An io.LimitReader is helpful when uploading an unbounded reader - // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned - // error must be used to signal end of stream. - // - // Defaults to package const's MaxUploadParts value. - MaxUploadParts int32 - - // The client to use when uploading to S3. - S3 UploadAPIClient - - // List of request options that will be passed down to individual API - // operation requests made by the uploader. - ClientOptions []func(*s3.Options) - - // Defines the buffer strategy used when uploading a part - BufferProvider ReadSeekerWriteToProvider - - // partPool allows for the re-usage of streaming payload part buffers between upload calls - partPool byteSlicePool -} - -// NewUploader creates a new Uploader instance to upload objects to S3. Pass In -// additional functional options to customize the uploader's behavior. Requires a -// client.ConfigProvider in order to create a S3 service client. The session.Session -// satisfies the client.ConfigProvider interface. -// -// Example: -// -// // Load AWS Config -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create an S3 Client with the config -// client := s3.NewFromConfig(cfg) -// -// // Create an uploader passing it the client -// uploader := manager.NewUploader(client) -// -// // Create an uploader with the client and custom options -// uploader := manager.NewUploader(client, func(u *manager.Uploader) { -// u.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader { - u := &Uploader{ - S3: client, - PartSize: DefaultUploadPartSize, - Concurrency: DefaultUploadConcurrency, - LeavePartsOnError: false, - MaxUploadParts: MaxUploadParts, - BufferProvider: defaultUploadBufferProvider(), - } - - for _, option := range options { - option(u) - } - - u.partPool = newByteSlicePool(u.PartSize) - - return u -} - -// Upload uploads an object to S3, intelligently buffering large -// files into smaller chunks and sending them in parallel across multiple -// goroutines. You can configure the buffer size and concurrency through the -// Uploader parameters. -// -// Additional functional options can be provided to configure the individual -// upload. These options are copies of the Uploader instance Upload is called from. -// Modifying the options will not impact the original Uploader instance. -// -// Use the WithUploaderRequestOptions helper function to pass in request -// options that will be applied to all API operations made with this uploader. -// -// It is safe to call this method concurrently across goroutines. -func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) ( - *UploadOutput, error, -) { - i := uploader{in: input, cfg: u, ctx: ctx} - - // Copy ClientOptions - clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1) - clientOptions = append(clientOptions, func(o *s3.Options) { - o.APIOptions = append(o.APIOptions, - middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), - addFeatureUserAgent, // yes, there are two of these - func(s *smithymiddleware.Stack) error { - return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After) - }, - ) - }) - clientOptions = append(clientOptions, i.cfg.ClientOptions...) - i.cfg.ClientOptions = clientOptions - - for _, opt := range opts { - opt(&i.cfg) - } - - return i.upload() -} - -// internal structure to manage an upload to S3. -type uploader struct { - ctx context.Context - cfg Uploader - - in *s3.PutObjectInput - - readerPos int64 // current reader position - totalSize int64 // set to -1 if the size is not known -} - -// internal logic for deciding whether to upload a single part or use a -// multipart upload. -func (u *uploader) upload() (*UploadOutput, error) { - if err := u.init(); err != nil { - return nil, fmt.Errorf("unable to initialize upload: %w", err) - } - defer u.cfg.partPool.Close() - - if u.cfg.PartSize < MinUploadPartSize { - return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize) - } - - // Do one read to determine if we have more than one part - reader, _, cleanup, err := u.nextReader() - if err == io.EOF { // single part - return u.singlePart(reader, cleanup) - } else if err != nil { - cleanup() - return nil, fmt.Errorf("read upload data failed: %w", err) - } - - mu := multiuploader{uploader: u} - return mu.upload(reader, cleanup) -} - -// init will initialize all default options. -func (u *uploader) init() error { - if err := validateSupportedARNType(aws.ToString(u.in.Bucket)); err != nil { - return err - } - - if u.cfg.Concurrency == 0 { - u.cfg.Concurrency = DefaultUploadConcurrency - } - if u.cfg.PartSize == 0 { - u.cfg.PartSize = DefaultUploadPartSize - } - if u.cfg.MaxUploadParts == 0 { - u.cfg.MaxUploadParts = MaxUploadParts - } - - // Try to get the total size for some optimizations - if err := u.initSize(); err != nil { - return err - } - - // If PartSize was changed or partPool was never setup then we need to allocated a new pool - // so that we return []byte slices of the correct size - poolCap := u.cfg.Concurrency + 1 - if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { - u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) - u.cfg.partPool.ModifyCapacity(poolCap) - } else { - u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} - u.cfg.partPool.ModifyCapacity(poolCap) - } - - return nil -} - -// initSize tries to detect the total stream size, setting u.totalSize. If -// the size is not known, totalSize is set to -1. -func (u *uploader) initSize() error { - u.totalSize = -1 - - switch r := u.in.Body.(type) { - case io.Seeker: - n, err := seekerLen(r) - if err != nil { - return err - } - u.totalSize = n - - // Try to adjust partSize if it is too small and account for - // integer division truncation. - if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { - // Add one to the part size to account for remainders - // during the size calculation. e.g odd number of bytes. - u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 - } - } - - return nil -} - -// nextReader returns a seekable reader representing the next packet of data. -// This operation increases the shared u.readerPos counter, but note that it -// does not need to be wrapped in a mutex because nextReader is only called -// from the main thread. -func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { - switch r := u.in.Body.(type) { - case readerAtSeeker: - var err error - - n := u.cfg.PartSize - if u.totalSize >= 0 { - bytesLeft := u.totalSize - u.readerPos - - if bytesLeft <= u.cfg.PartSize { - err = io.EOF - n = bytesLeft - } - } - - var ( - reader io.ReadSeeker - cleanup func() - ) - - reader = io.NewSectionReader(r, u.readerPos, n) - if u.cfg.BufferProvider != nil { - reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) - } else { - cleanup = func() {} - } - - u.readerPos += n - - return reader, int(n), cleanup, err - - default: - part, err := u.cfg.partPool.Get(u.ctx) - if err != nil { - return nil, 0, func() {}, err - } - - n, err := readFillBuf(r, *part) - u.readerPos += int64(n) - - cleanup := func() { - u.cfg.partPool.Put(part) - } - - return bytes.NewReader((*part)[0:n]), n, cleanup, err - } -} - -func readFillBuf(r io.Reader, b []byte) (offset int, err error) { - for offset < len(b) && err == nil { - var n int - n, err = r.Read(b[offset:]) - offset += n - } - - return offset, err -} - -// singlePart contains upload logic for uploading a single chunk via -// a regular PutObject request. Multipart requests require at least two -// parts, or at least 5MB of data. -func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { - defer cleanup() - - var params s3.PutObjectInput - awsutil.Copy(¶ms, u.in) - params.Body = r - - // Need to use request form because URL generated in request is - // used in return. - - var locationRecorder recordLocationClient - out, err := u.cfg.S3.PutObject(u.ctx, ¶ms, - append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) - if err != nil { - return nil, err - } - - return &UploadOutput{ - Location: locationRecorder.location, - - BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled), - ChecksumCRC32: out.ChecksumCRC32, - ChecksumCRC32C: out.ChecksumCRC32C, - ChecksumSHA1: out.ChecksumSHA1, - ChecksumSHA256: out.ChecksumSHA256, - ETag: out.ETag, - Expiration: out.Expiration, - Key: params.Key, - RequestCharged: out.RequestCharged, - SSEKMSKeyId: out.SSEKMSKeyId, - ServerSideEncryption: out.ServerSideEncryption, - VersionID: out.VersionId, - }, nil -} - -type httpClient interface { - Do(r *http.Request) (*http.Response, error) -} - -type recordLocationClient struct { - httpClient - location string -} - -func (c *recordLocationClient) WrapClient() func(o *s3.Options) { - return func(o *s3.Options) { - c.httpClient = o.HTTPClient - o.HTTPClient = c - } -} - -func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) { - resp, err = c.httpClient.Do(r) - if err != nil { - return resp, err - } - - if resp.Request != nil && resp.Request.URL != nil { - url := *resp.Request.URL - url.RawQuery = "" - c.location = url.String() - } - - return resp, err -} - -// internal structure to manage a specific multipart upload to S3. -type multiuploader struct { - *uploader - wg sync.WaitGroup - m sync.Mutex - err error - uploadID string - parts completedParts -} - -// keeps track of a single chunk of data being sent to S3. -type chunk struct { - buf io.ReadSeeker - num int32 - cleanup func() -} - -// completedParts is a wrapper to make parts sortable by their part number, -// since S3 required this list to be sent in sorted order. -type completedParts []types.CompletedPart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { - return aws.ToInt32(a[i].PartNumber) < aws.ToInt32(a[j].PartNumber) -} - -// upload will perform a multipart upload using the firstBuf buffer containing -// the first chunk of data. -func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { - var params s3.CreateMultipartUploadInput - awsutil.Copy(¶ms, u.in) - - // Create the multipart - var locationRecorder recordLocationClient - resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, ¶ms, - append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) - if err != nil { - cleanup() - return nil, err - } - u.uploadID = *resp.UploadId - - // Create the workers - ch := make(chan chunk, u.cfg.Concurrency) - for i := 0; i < u.cfg.Concurrency; i++ { - u.wg.Add(1) - go u.readChunk(ch) - } - - // Send part 1 to the workers - var num int32 = 1 - ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} - - // Read and queue the rest of the parts - for u.geterr() == nil && err == nil { - var ( - reader io.ReadSeeker - nextChunkLen int - ok bool - ) - - reader, nextChunkLen, cleanup, err = u.nextReader() - ok, err = u.shouldContinue(num, nextChunkLen, err) - if !ok { - cleanup() - if err != nil { - u.seterr(err) - } - break - } - - num++ - - ch <- chunk{buf: reader, num: num, cleanup: cleanup} - } - - // Close the channel, wait for workers, and complete upload - close(ch) - u.wg.Wait() - completeOut := u.complete() - - if err := u.geterr(); err != nil { - return nil, &multiUploadError{ - err: err, - uploadID: u.uploadID, - } - } - - return &UploadOutput{ - Location: locationRecorder.location, - UploadID: u.uploadID, - CompletedParts: u.parts, - - BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled), - ChecksumCRC32: completeOut.ChecksumCRC32, - ChecksumCRC32C: completeOut.ChecksumCRC32C, - ChecksumSHA1: completeOut.ChecksumSHA1, - ChecksumSHA256: completeOut.ChecksumSHA256, - ETag: completeOut.ETag, - Expiration: completeOut.Expiration, - Key: completeOut.Key, - RequestCharged: completeOut.RequestCharged, - SSEKMSKeyId: completeOut.SSEKMSKeyId, - ServerSideEncryption: completeOut.ServerSideEncryption, - VersionID: completeOut.VersionId, - }, nil -} - -func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) { - if err != nil && err != io.EOF { - return false, fmt.Errorf("read multipart upload data failed, %w", err) - } - - if nextChunkLen == 0 { - // No need to upload empty part, if file was empty to start - // with empty single part would of been created and never - // started multipart upload. - return false, nil - } - - part++ - // This upload exceeded maximum number of supported parts, error now. - if part > u.cfg.MaxUploadParts || part > MaxUploadParts { - var msg string - if part > u.cfg.MaxUploadParts { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) - } - return false, fmt.Errorf(msg) - } - - return true, err -} - -// readChunk runs in worker goroutines to pull chunks off of the ch channel -// and send() them as UploadPart requests. -func (u *multiuploader) readChunk(ch chan chunk) { - defer u.wg.Done() - for { - data, ok := <-ch - - if !ok { - break - } - - if u.geterr() == nil { - if err := u.send(data); err != nil { - u.seterr(err) - } - } - - data.cleanup() - } -} - -// send performs an UploadPart request and keeps track of the completed -// part information. -func (u *multiuploader) send(c chunk) error { - params := &s3.UploadPartInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - Body: c.buf, - SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, - SSECustomerKey: u.in.SSECustomerKey, - SSECustomerKeyMD5: u.in.SSECustomerKeyMD5, - ExpectedBucketOwner: u.in.ExpectedBucketOwner, - RequestPayer: u.in.RequestPayer, - - ChecksumAlgorithm: u.in.ChecksumAlgorithm, - // Invalid to set any of the individual ChecksumXXX members from - // PutObject as they are never valid for individual parts of a - // multipart upload. - - PartNumber: aws.Int32(c.num), - UploadId: &u.uploadID, - } - // TODO should do copy then clear? - - resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...) - if err != nil { - return err - } - - var completed types.CompletedPart - awsutil.Copy(&completed, resp) - completed.PartNumber = aws.Int32(c.num) - - u.m.Lock() - u.parts = append(u.parts, completed) - u.m.Unlock() - - return nil -} - -// geterr is a thread-safe getter for the error object -func (u *multiuploader) geterr() error { - u.m.Lock() - defer u.m.Unlock() - - return u.err -} - -// seterr is a thread-safe setter for the error object -func (u *multiuploader) seterr(e error) { - u.m.Lock() - defer u.m.Unlock() - - u.err = e -} - -// fail will abort the multipart unless LeavePartsOnError is set to true. -func (u *multiuploader) fail() { - if u.cfg.LeavePartsOnError { - return - } - - params := &s3.AbortMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadId: &u.uploadID, - } - _, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...) - if err != nil { - // TODO: Add logging - //logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) - _ = err - } -} - -// complete successfully completes a multipart upload and returns the response. -func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { - if u.geterr() != nil { - u.fail() - return nil - } - - // Parts must be sorted in PartNumber order. - sort.Sort(u.parts) - - var params s3.CompleteMultipartUploadInput - awsutil.Copy(¶ms, u.in) - params.UploadId = &u.uploadID - params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts} - - resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, ¶ms, u.cfg.ClientOptions...) - if err != nil { - u.seterr(err) - u.fail() - } - - return resp -} - -type readerAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// setS3ExpressDefaultChecksum defaults to CRC32 for S3Express buckets, -// which is required when uploading to those through transfer manager. -type setS3ExpressDefaultChecksum struct{} - -func (*setS3ExpressDefaultChecksum) ID() string { - return "setS3ExpressDefaultChecksum" -} - -func (*setS3ExpressDefaultChecksum) HandleFinalize( - ctx context.Context, in smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler, -) ( - out smithymiddleware.FinalizeOutput, metadata smithymiddleware.Metadata, err error, -) { - const checksumHeader = "x-amz-checksum-algorithm" - - if internalcontext.GetS3Backend(ctx) != internalcontext.S3BackendS3Express { - return next.HandleFinalize(ctx, in) - } - - // If this is CreateMultipartUpload we need to ensure the checksum - // algorithm header is present. Otherwise everything is driven off the - // context setting and we can let it flow from there. - if middleware.GetOperationName(ctx) == "CreateMultipartUpload" { - r, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { - r.Header.Set(checksumHeader, "CRC32") - } - return next.HandleFinalize(ctx, in) - } else if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(types.ChecksumAlgorithmCrc32)) - } - - return next.HandleFinalize(ctx, in) -} - -func addFeatureUserAgent(stack *smithymiddleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(middleware.UserAgentFeatureS3Transfer) - return nil -} - -func getOrAddRequestUserAgent(stack *smithymiddleware.Stack) (*middleware.RequestUserAgent, error) { - id := (*middleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = middleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, smithymiddleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*middleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go deleted file mode 100644 index fb10ec309ac8..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go +++ /dev/null @@ -1,83 +0,0 @@ -package manager - -import ( - "bufio" - "io" - "sync" - - "github.com/aws/aws-sdk-go-v2/internal/sdkio" -) - -// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom -type WriterReadFrom interface { - io.Writer - io.ReaderFrom -} - -// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer -type WriterReadFromProvider interface { - GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) -} - -type bufferedWriter interface { - WriterReadFrom - Flush() error - Reset(io.Writer) -} - -type bufferedReadFrom struct { - bufferedWriter -} - -func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { - n, err := b.bufferedWriter.ReadFrom(r) - if flushErr := b.Flush(); flushErr != nil && err == nil { - err = flushErr - } - return n, err -} - -// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool -// to manage allocation and reuse of *bufio.Writer structures. -type PooledBufferedReadFromProvider struct { - pool sync.Pool -} - -// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider -// Size is used to control the size of the underlying *bufio.Writer created for -// calls to GetReadFrom. -func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { - if size < int(32*sdkio.KibiByte) { - size = int(64 * sdkio.KibiByte) - } - - return &PooledBufferedReadFromProvider{ - pool: sync.Pool{ - New: func() interface{} { - return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} - }, - }, - } -} - -// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom -// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom -// has been completed in order to allow the reuse of the *bufio.Writer -func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { - buffer := p.pool.Get().(*bufferedReadFrom) - buffer.Reset(writer) - r = buffer - cleanup = func() { - buffer.Reset(nil) // Reset to nil writer to release reference - p.pool.Put(buffer) - } - return r, cleanup -} - -type suppressWriterAt struct { - suppressed io.Reader -} - -func (s *suppressWriterAt) Read(p []byte) (n int, err error) { - return s.suppressed.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go deleted file mode 100644 index 0b81db5480c6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go +++ /dev/null @@ -1,45 +0,0 @@ -package auth - -import ( - "github.com/aws/smithy-go/auth" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme -// for pre-existing implementations where the signer was added to client -// config. SDK clients will key off of this type and ensure per-operation -// updates to those signers persist on the scheme itself. -type HTTPAuthScheme struct { - schemeID string - signer smithyhttp.Signer -} - -var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil) - -// NewHTTPAuthScheme returns an auth scheme instance with the given config. -func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme { - return &HTTPAuthScheme{ - schemeID: schemeID, - signer: signer, - } -} - -// SchemeID identifies the auth scheme. -func (s *HTTPAuthScheme) SchemeID() string { - return s.schemeID -} - -// IdentityResolver gets the identity resolver for the auth scheme. -func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { - return o.GetIdentityResolver(s.schemeID) -} - -// Signer gets the signer for the auth scheme. -func (s *HTTPAuthScheme) Signer() smithyhttp.Signer { - return s.signer -} - -// WithSigner returns a new instance of the auth scheme with the updated signer. -func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme { - return NewHTTPAuthScheme(s.schemeID, signer) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go deleted file mode 100644 index bbc2ec06ecce..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go +++ /dev/null @@ -1,191 +0,0 @@ -package auth - -import ( - "context" - "fmt" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -// SigV4 is a constant representing -// Authentication Scheme Signature Version 4 -const SigV4 = "sigv4" - -// SigV4A is a constant representing -// Authentication Scheme Signature Version 4A -const SigV4A = "sigv4a" - -// SigV4S3Express identifies the S3 S3Express auth scheme. -const SigV4S3Express = "sigv4-s3express" - -// None is a constant representing the -// None Authentication Scheme -const None = "none" - -// SupportedSchemes is a data structure -// that indicates the list of supported AWS -// authentication schemes -var SupportedSchemes = map[string]bool{ - SigV4: true, - SigV4A: true, - SigV4S3Express: true, - None: true, -} - -// AuthenticationScheme is a representation of -// AWS authentication schemes -type AuthenticationScheme interface { - isAuthenticationScheme() -} - -// AuthenticationSchemeV4 is a AWS SigV4 representation -type AuthenticationSchemeV4 struct { - Name string - SigningName *string - SigningRegion *string - DisableDoubleEncoding *bool -} - -func (a *AuthenticationSchemeV4) isAuthenticationScheme() {} - -// AuthenticationSchemeV4A is a AWS SigV4A representation -type AuthenticationSchemeV4A struct { - Name string - SigningName *string - SigningRegionSet []string - DisableDoubleEncoding *bool -} - -func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {} - -// AuthenticationSchemeNone is a representation for the none auth scheme -type AuthenticationSchemeNone struct{} - -func (a *AuthenticationSchemeNone) isAuthenticationScheme() {} - -// NoAuthenticationSchemesFoundError is used in signaling -// that no authentication schemes have been specified. -type NoAuthenticationSchemesFoundError struct{} - -func (e *NoAuthenticationSchemesFoundError) Error() string { - return fmt.Sprint("No authentication schemes specified.") -} - -// UnSupportedAuthenticationSchemeSpecifiedError is used in -// signaling that only unsupported authentication schemes -// were specified. -type UnSupportedAuthenticationSchemeSpecifiedError struct { - UnsupportedSchemes []string -} - -func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string { - return fmt.Sprint("Unsupported authentication scheme specified.") -} - -// GetAuthenticationSchemes extracts the relevant authentication scheme data -// into a custom strongly typed Go data structure. -func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) { - var result []AuthenticationScheme - if !p.Has("authSchemes") { - return nil, &NoAuthenticationSchemesFoundError{} - } - - authSchemes, _ := p.Get("authSchemes").([]interface{}) - - var unsupportedSchemes []string - for _, scheme := range authSchemes { - authScheme, _ := scheme.(map[string]interface{}) - - version := authScheme["name"].(string) - switch version { - case SigV4, SigV4S3Express: - v4Scheme := AuthenticationSchemeV4{ - Name: version, - SigningName: getSigningName(authScheme), - SigningRegion: getSigningRegion(authScheme), - DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), - } - result = append(result, AuthenticationScheme(&v4Scheme)) - case SigV4A: - v4aScheme := AuthenticationSchemeV4A{ - Name: SigV4A, - SigningName: getSigningName(authScheme), - SigningRegionSet: getSigningRegionSet(authScheme), - DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), - } - result = append(result, AuthenticationScheme(&v4aScheme)) - case None: - noneScheme := AuthenticationSchemeNone{} - result = append(result, AuthenticationScheme(&noneScheme)) - default: - unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string)) - continue - } - } - - if len(result) == 0 { - return nil, &UnSupportedAuthenticationSchemeSpecifiedError{ - UnsupportedSchemes: unsupportedSchemes, - } - } - - return result, nil -} - -type disableDoubleEncoding struct{} - -// SetDisableDoubleEncoding sets or modifies the disable double encoding option -// on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value) -} - -// GetDisableDoubleEncoding retrieves the disable double encoding option -// from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) { - value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool) - return value, ok -} - -func getSigningName(authScheme map[string]interface{}) *string { - signingName, ok := authScheme["signingName"].(string) - if !ok || signingName == "" { - return nil - } - return &signingName -} - -func getSigningRegionSet(authScheme map[string]interface{}) []string { - untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{}) - if !ok { - return nil - } - signingRegionSet := []string{} - for _, item := range untypedSigningRegionSet { - signingRegionSet = append(signingRegionSet, item.(string)) - } - return signingRegionSet -} - -func getSigningRegion(authScheme map[string]interface{}) *string { - signingRegion, ok := authScheme["signingRegion"].(string) - if !ok || signingRegion == "" { - return nil - } - return &signingRegion -} - -func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool { - disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool) - if !ok { - return nil - } - return &disableDoubleEncoding -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go deleted file mode 100644 index f059b5d391f4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go +++ /dev/null @@ -1,43 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - "time" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/auth/bearer" -) - -// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity. -type BearerTokenAdapter struct { - Token bearer.Token -} - -var _ auth.Identity = (*BearerTokenAdapter)(nil) - -// Expiration returns the time of expiration for the token. -func (v *BearerTokenAdapter) Expiration() time.Time { - return v.Token.Expires -} - -// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy -// auth.IdentityResolver. -type BearerTokenProviderAdapter struct { - Provider bearer.TokenProvider -} - -var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil) - -// GetIdentity retrieves a bearer token using the underlying provider. -func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - token, err := v.Provider.RetrieveBearerToken(ctx) - if err != nil { - return nil, fmt.Errorf("get token: %w", err) - } - - return &BearerTokenAdapter{Token: token}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go deleted file mode 100644 index a88281527c02..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go +++ /dev/null @@ -1,35 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/auth/bearer" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http -// auth.Signer. -type BearerTokenSignerAdapter struct { - Signer bearer.Signer -} - -var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil) - -// SignRequest signs the request with the provided bearer token. -func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error { - ca, ok := identity.(*BearerTokenAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r) - if err != nil { - return fmt.Errorf("sign request: %w", err) - } - - *r = *signed.(*smithyhttp.Request) - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go deleted file mode 100644 index f926c4aaa76c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go +++ /dev/null @@ -1,46 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// CredentialsAdapter adapts aws.Credentials to auth.Identity. -type CredentialsAdapter struct { - Credentials aws.Credentials -} - -var _ auth.Identity = (*CredentialsAdapter)(nil) - -// Expiration returns the time of expiration for the credentials. -func (v *CredentialsAdapter) Expiration() time.Time { - return v.Credentials.Expires -} - -// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver. -type CredentialsProviderAdapter struct { - Provider aws.CredentialsProvider -} - -var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) - -// GetIdentity retrieves AWS credentials using the underlying provider. -func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - if v.Provider == nil { - return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil - } - - creds, err := v.Provider.Retrieve(ctx) - if err != nil { - return nil, fmt.Errorf("get credentials: %w", err) - } - - return &CredentialsAdapter{Credentials: creds}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go deleted file mode 100644 index 42b458673390..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions. -package smithy diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go deleted file mode 100644 index 24db8e144cba..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ /dev/null @@ -1,57 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer. -type V4SignerAdapter struct { - Signer v4.HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil) - -// SignRequest signs the request with the provided identity. -func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4 signing name is required") - } - - region, ok := smithyhttp.GetSigV4SigningRegion(&props) - if !ok { - return fmt.Errorf("sigv4 signing region is required") - } - - hash := v4.GetPayloadHash(ctx) - signingTime := sdk.NowTime() - skew := internalcontext.GetAttemptSkewContext(ctx) - signingTime = signingTime.Add(skew) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) { - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %w", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go deleted file mode 100644 index 938cd14c1e4c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go +++ /dev/null @@ -1,112 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - if dst.Kind() == reflect.String { - dst.SetString(e.String()) - } else { - dst.Set(reflect.New(e)) - } - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if dst.Kind() != reflect.String && src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go deleted file mode 100644 index bcfe51a2b7ee..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go +++ /dev/null @@ -1,33 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - // Special casing for strings as typed enumerations are string aliases - // but are not deep equal. - if ra.Kind() == reflect.String && rb.Kind() == reflect.String { - return ra.String() == rb.String() - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go deleted file mode 100644 index 1adecae6b941..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go +++ /dev/null @@ -1,131 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - isPtr := false - for v.Kind() == reflect.Ptr { - isPtr = true - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - if isPtr { - buf.WriteRune('&') - } - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - if isPtr { - buf.WriteRune('&') - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - if isPtr { - buf.WriteRune('&') - } - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - - for v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - - if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice { - prettify(v, indent, buf) - return - } - - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go deleted file mode 100644 index 645df2450fc5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go +++ /dev/null @@ -1,88 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md deleted file mode 100644 index 48b7efd9d6c4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ /dev/null @@ -1,434 +0,0 @@ -# v1.4.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.37 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.36 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.35 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.34 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.33 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.32 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.31 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.30 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.29 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.3.28 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.43 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.42 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.41 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.40 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.39 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.38 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.37 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.36 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.35 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.34 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.33 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.32 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.31 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.30 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.29 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.28 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.27 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.26 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.25 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.24 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.23 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.22 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.21 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.20 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.19 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.18 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.17 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.16 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.15 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.14 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.13 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.12 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.11 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.10 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.9 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.8 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.7 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2022-03-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2022-02-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.4 (2022-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2022-01-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.7 (2021-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.6 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.5 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.4 (2021-08-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.2 (2021-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.1 (2021-07-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2021-06-25) - -* **Release**: Release new modules -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go deleted file mode 100644 index cd4d19b89821..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package configsources - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" -) - -// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value -// for Enable Endpoint Discovery -type EnableEndpointDiscoveryProvider interface { - GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) -} - -// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. -// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, -// and error if one is encountered. -func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { - value, found, err = p.GetEnableEndpointDiscovery(ctx) - if err != nil || found { - break - } - } - } - return -} - -// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint -type UseDualStackEndpointProvider interface { - GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) -} - -// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseDualStackEndpointProvider); ok { - value, found, err = p.GetUseDualStackEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} - -// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint -type UseFIPSEndpointProvider interface { - GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) -} - -// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. -// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseFIPSEndpointProvider); ok { - value, found, err = p.GetUseFIPSEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go deleted file mode 100644 index e7835f852415..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go +++ /dev/null @@ -1,57 +0,0 @@ -package configsources - -import ( - "context" -) - -// ServiceBaseEndpointProvider is needed to search for all providers -// that provide a configured service endpoint -type ServiceBaseEndpointProvider interface { - GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) -} - -// IgnoreConfiguredEndpointsProvider is needed to search for all providers -// that provide a flag to disable configured endpoints. -// -// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because -// service packages cannot import github.com/aws/aws-sdk-go-v2/config -// due to result import cycle error. -type IgnoreConfiguredEndpointsProvider interface { - GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -// -// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because -// service packages cannot import github.com/aws/aws-sdk-go-v2/config -// due to result import cycle error. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { - value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources -// while allowing for configured endpoints to be disabled -func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { - if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { - return "", false, nil - } - - for _, cs := range configs { - if p, ok := cs.(ServiceBaseEndpointProvider); ok { - value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go deleted file mode 100644 index e304ef67cf6a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package configsources - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go deleted file mode 100644 index f0c283d3942b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go +++ /dev/null @@ -1,52 +0,0 @@ -package context - -import ( - "context" - "time" - - "github.com/aws/smithy-go/middleware" -) - -type s3BackendKey struct{} -type checksumInputAlgorithmKey struct{} -type clockSkew struct{} - -const ( - // S3BackendS3Express identifies the S3Express backend - S3BackendS3Express = "S3Express" -) - -// SetS3Backend stores the resolved endpoint backend within the request -// context, which is required for a variety of custom S3 behaviors. -func SetS3Backend(ctx context.Context, typ string) context.Context { - return middleware.WithStackValue(ctx, s3BackendKey{}, typ) -} - -// GetS3Backend retrieves the stored endpoint backend within the context. -func GetS3Backend(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string) - return v -} - -// SetChecksumInputAlgorithm sets the request checksum algorithm on the -// context. -func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value) -} - -// GetChecksumInputAlgorithm returns the checksum algorithm from the context. -func GetChecksumInputAlgorithm(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) - return v -} - -// SetAttemptSkewContext sets the clock skew value on the context -func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context { - return middleware.WithStackValue(ctx, clockSkew{}, v) -} - -// GetAttemptSkewContext gets the clock skew value from the context -func GetAttemptSkewContext(ctx context.Context) time.Duration { - x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration) - return x -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go deleted file mode 100644 index e6223dd3b3e6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go +++ /dev/null @@ -1,94 +0,0 @@ -package awsrulesfn - -import ( - "strings" -) - -// ARN provides AWS ARN components broken out into a data structure. -type ARN struct { - Partition string - Service string - Region string - AccountId string - ResourceId OptionalStringSlice -} - -const ( - arnDelimiters = ":" - resourceDelimiters = "/:" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 -) - -// ParseARN returns an [ARN] value parsed from the input string provided. If -// the ARN cannot be parsed nil will be returned, and error added to -// [ErrorCollector]. -func ParseARN(input string) *ARN { - if !strings.HasPrefix(input, arnPrefix) { - return nil - } - - sections := strings.SplitN(input, arnDelimiters, arnSections) - if numSections := len(sections); numSections != arnSections { - return nil - } - - if sections[sectionPartition] == "" { - return nil - } - if sections[sectionService] == "" { - return nil - } - if sections[sectionResource] == "" { - return nil - } - - return &ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountId: sections[sectionAccountID], - ResourceId: splitResource(sections[sectionResource]), - } -} - -// splitResource splits the resource components by the ARN resource delimiters. -func splitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// OptionalStringSlice provides a helper to safely get the index of a string -// slice that may be out of bounds. Returns pointer to string if index is -// valid. Otherwise returns nil. -type OptionalStringSlice []string - -// Get returns a string pointer of the string at index i if the index is valid. -// Otherwise returns nil. -func (s OptionalStringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go deleted file mode 100644 index d5a365853f82..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package awsrulesfn provides AWS focused endpoint rule functions for -// evaluating endpoint resolution rules. -package awsrulesfn diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go deleted file mode 100644 index df72da97ce30..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build codegen -// +build codegen - -package awsrulesfn - -//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go deleted file mode 100644 index 637e5fc18e42..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go +++ /dev/null @@ -1,51 +0,0 @@ -package awsrulesfn - -import ( - "net" - "strings" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket -// name and can be used with Amazon S3 virtual hosted style addressing. Similar -// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label -// must be [3:63] characters long, all lowercase, and not formatted as an IP -// address. -func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool { - // input should not be formatted as an IP address - // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but - // validation further down will catch that anyway (it's guaranteed to have - // unfriendly characters % and : if that's the case) - if net.ParseIP(input) != nil { - return false - } - - var labels []string - if allowSubDomains { - labels = strings.Split(input, ".") - } else { - labels = []string{input} - } - - for _, label := range labels { - // validate special length constraints - if l := len(label); l < 3 || l > 63 { - return false - } - - // Validate no capital letters - for _, r := range label { - if r >= 'A' && r <= 'Z' { - return false - } - } - - // Validate valid host label - if !smithyhttp.ValidHostLabel(label) { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go deleted file mode 100644 index 91414afe81c5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ /dev/null @@ -1,76 +0,0 @@ -package awsrulesfn - -import "regexp" - -// Partition provides the metadata describing an AWS partition. -type Partition struct { - ID string `json:"id"` - Regions map[string]RegionOverrides `json:"regions"` - RegionRegex string `json:"regionRegex"` - DefaultConfig PartitionConfig `json:"outputs"` -} - -// PartitionConfig provides the endpoint metadata for an AWS region or partition. -type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` - ImplicitGlobalRegion string `json:"implicitGlobalRegion"` -} - -type RegionOverrides struct { - Name *string `json:"name"` - DnsSuffix *string `json:"dnsSuffix"` - DualStackDnsSuffix *string `json:"dualStackDnsSuffix"` - SupportsFIPS *bool `json:"supportsFIPS"` - SupportsDualStack *bool `json:"supportsDualStack"` -} - -const defaultPartition = "aws" - -func getPartition(partitions []Partition, region string) *PartitionConfig { - for _, partition := range partitions { - if v, ok := partition.Regions[region]; ok { - p := mergeOverrides(partition.DefaultConfig, v) - return &p - } - } - - for _, partition := range partitions { - regionRegex := regexp.MustCompile(partition.RegionRegex) - if regionRegex.MatchString(region) { - v := partition.DefaultConfig - return &v - } - } - - for _, partition := range partitions { - if partition.ID == defaultPartition { - v := partition.DefaultConfig - return &v - } - } - - return nil -} - -func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig { - if from.Name != nil { - into.Name = *from.Name - } - if from.DnsSuffix != nil { - into.DnsSuffix = *from.DnsSuffix - } - if from.DualStackDnsSuffix != nil { - into.DualStackDnsSuffix = *from.DualStackDnsSuffix - } - if from.SupportsFIPS != nil { - into.SupportsFIPS = *from.SupportsFIPS - } - if from.SupportsDualStack != nil { - into.SupportsDualStack = *from.SupportsDualStack - } - return into -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go deleted file mode 100644 index 83e5ac62bf76..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT. - -package awsrulesfn - -// GetPartition returns an AWS [Partition] for the region provided. If the -// partition cannot be determined nil will be returned. -func GetPartition(region string) *PartitionConfig { - return getPartition(partitions, region) -} - -var partitions = []Partition{ - { - ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "us-east-1", - }, - Regions: map[string]RegionOverrides{ - "af-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-east-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-south-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-4": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-5": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-7": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "aws-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ca-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ca-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-central-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-north-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-south-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "il-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "me-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "me-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "mx-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "sa-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-east-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-west-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-cn", - RegionRegex: "^cn\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "cn-northwest-1", - }, - Regions: map[string]RegionOverrides{ - "aws-cn-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "cn-north-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "cn-northwest-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-eusc", - RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-eusc", - DnsSuffix: "amazonaws.eu", - DualStackDnsSuffix: "api.amazonwebservices.eu", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "eusc-de-east-1", - }, - Regions: map[string]RegionOverrides{ - "eusc-de-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso", - RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "api.aws.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "us-iso-east-1", - }, - Regions: map[string]RegionOverrides{ - "aws-iso-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-iso-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-iso-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-b", - RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "api.aws.scloud", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "us-isob-east-1", - }, - Regions: map[string]RegionOverrides{ - "aws-iso-b-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-isob-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-e", - RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "eu-isoe-west-1", - }, - Regions: map[string]RegionOverrides{ - "aws-iso-e-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-isoe-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-f", - RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "api.aws.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "us-isof-south-1", - }, - Regions: map[string]RegionOverrides{ - "aws-iso-f-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-isof-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-isof-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", - }, - Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json deleted file mode 100644 index 299cb220419f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ /dev/null @@ -1,261 +0,0 @@ -{ - "partitions" : [ { - "id" : "aws", - "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-east-1", - "name" : "aws", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", - "regions" : { - "af-south-1" : { - "description" : "Africa (Cape Town)" - }, - "ap-east-1" : { - "description" : "Asia Pacific (Hong Kong)" - }, - "ap-east-2" : { - "description" : "Asia Pacific (Taipei)" - }, - "ap-northeast-1" : { - "description" : "Asia Pacific (Tokyo)" - }, - "ap-northeast-2" : { - "description" : "Asia Pacific (Seoul)" - }, - "ap-northeast-3" : { - "description" : "Asia Pacific (Osaka)" - }, - "ap-south-1" : { - "description" : "Asia Pacific (Mumbai)" - }, - "ap-south-2" : { - "description" : "Asia Pacific (Hyderabad)" - }, - "ap-southeast-1" : { - "description" : "Asia Pacific (Singapore)" - }, - "ap-southeast-2" : { - "description" : "Asia Pacific (Sydney)" - }, - "ap-southeast-3" : { - "description" : "Asia Pacific (Jakarta)" - }, - "ap-southeast-4" : { - "description" : "Asia Pacific (Melbourne)" - }, - "ap-southeast-5" : { - "description" : "Asia Pacific (Malaysia)" - }, - "ap-southeast-7" : { - "description" : "Asia Pacific (Thailand)" - }, - "aws-global" : { - "description" : "aws global region" - }, - "ca-central-1" : { - "description" : "Canada (Central)" - }, - "ca-west-1" : { - "description" : "Canada West (Calgary)" - }, - "eu-central-1" : { - "description" : "Europe (Frankfurt)" - }, - "eu-central-2" : { - "description" : "Europe (Zurich)" - }, - "eu-north-1" : { - "description" : "Europe (Stockholm)" - }, - "eu-south-1" : { - "description" : "Europe (Milan)" - }, - "eu-south-2" : { - "description" : "Europe (Spain)" - }, - "eu-west-1" : { - "description" : "Europe (Ireland)" - }, - "eu-west-2" : { - "description" : "Europe (London)" - }, - "eu-west-3" : { - "description" : "Europe (Paris)" - }, - "il-central-1" : { - "description" : "Israel (Tel Aviv)" - }, - "me-central-1" : { - "description" : "Middle East (UAE)" - }, - "me-south-1" : { - "description" : "Middle East (Bahrain)" - }, - "mx-central-1" : { - "description" : "Mexico (Central)" - }, - "sa-east-1" : { - "description" : "South America (Sao Paulo)" - }, - "us-east-1" : { - "description" : "US East (N. Virginia)" - }, - "us-east-2" : { - "description" : "US East (Ohio)" - }, - "us-west-1" : { - "description" : "US West (N. California)" - }, - "us-west-2" : { - "description" : "US West (Oregon)" - } - } - }, { - "id" : "aws-cn", - "outputs" : { - "dnsSuffix" : "amazonaws.com.cn", - "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", - "implicitGlobalRegion" : "cn-northwest-1", - "name" : "aws-cn", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^cn\\-\\w+\\-\\d+$", - "regions" : { - "aws-cn-global" : { - "description" : "aws-cn global region" - }, - "cn-north-1" : { - "description" : "China (Beijing)" - }, - "cn-northwest-1" : { - "description" : "China (Ningxia)" - } - } - }, { - "id" : "aws-eusc", - "outputs" : { - "dnsSuffix" : "amazonaws.eu", - "dualStackDnsSuffix" : "api.amazonwebservices.eu", - "implicitGlobalRegion" : "eusc-de-east-1", - "name" : "aws-eusc", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", - "regions" : { - "eusc-de-east-1" : { - "description" : "EU (Germany)" - } - } - }, { - "id" : "aws-iso", - "outputs" : { - "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "api.aws.ic.gov", - "implicitGlobalRegion" : "us-iso-east-1", - "name" : "aws-iso", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-global" : { - "description" : "aws-iso global region" - }, - "us-iso-east-1" : { - "description" : "US ISO East" - }, - "us-iso-west-1" : { - "description" : "US ISO WEST" - } - } - }, { - "id" : "aws-iso-b", - "outputs" : { - "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "api.aws.scloud", - "implicitGlobalRegion" : "us-isob-east-1", - "name" : "aws-iso-b", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-b-global" : { - "description" : "aws-iso-b global region" - }, - "us-isob-east-1" : { - "description" : "US ISOB East (Ohio)" - } - } - }, { - "id" : "aws-iso-e", - "outputs" : { - "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk", - "implicitGlobalRegion" : "eu-isoe-west-1", - "name" : "aws-iso-e", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-e-global" : { - "description" : "aws-iso-e global region" - }, - "eu-isoe-west-1" : { - "description" : "EU ISOE West" - } - } - }, { - "id" : "aws-iso-f", - "outputs" : { - "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "api.aws.hci.ic.gov", - "implicitGlobalRegion" : "us-isof-south-1", - "name" : "aws-iso-f", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-f-global" : { - "description" : "aws-iso-f global region" - }, - "us-isof-east-1" : { - "description" : "US ISOF EAST" - }, - "us-isof-south-1" : { - "description" : "US ISOF SOUTH" - } - } - }, { - "id" : "aws-us-gov", - "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", - "regions" : { - "aws-us-gov-global" : { - "description" : "aws-us-gov global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" - } - } - } ], - "version" : "1.1" -} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go deleted file mode 100644 index 67950ca3661d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go +++ /dev/null @@ -1,201 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4"} -) - -// Options provide configuration needed to direct how endpoints are resolved. -type Options struct { - // Disable usage of HTTPS (TLS / SSL) - DisableHTTPS bool -} - -// Partitions is a slice of partition -type Partitions []Partition - -// ResolveEndpoint resolves a service endpoint for the given region and options. -func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { - if len(ps) == 0 { - return aws.Endpoint{}, fmt.Errorf("no partitions found") - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(region) { - continue - } - - return ps[i].ResolveEndpoint(region, opts) - } - - // fallback to first partition format to use when resolving the endpoint. - return ps[0].ResolveEndpoint(region, opts) -} - -// Partition is an AWS partition description for a service and its' region endpoints. -type Partition struct { - ID string - RegionRegex *regexp.Regexp - PartitionEndpoint string - IsRegionalized bool - Defaults Endpoint - Endpoints Endpoints -} - -func (p Partition) canResolveEndpoint(region string) bool { - _, ok := p.Endpoints[region] - return ok || p.RegionRegex.MatchString(region) -} - -// ResolveEndpoint resolves and service endpoint for the given region and options. -func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { - if len(region) == 0 && len(p.PartitionEndpoint) != 0 { - region = p.PartitionEndpoint - } - - e, _ := p.endpointForRegion(region) - - return e.resolve(p.ID, region, p.Defaults, options), nil -} - -func (p Partition) endpointForRegion(region string) (Endpoint, bool) { - if e, ok := p.Endpoints[region]; ok { - return e, true - } - - if !p.IsRegionalized { - return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return Endpoint{}, false -} - -// Endpoints is a map of service config regions to endpoints -type Endpoints map[string]Endpoint - -// CredentialScope is the credential scope of a region and service -type CredentialScope struct { - Region string - Service string -} - -// Endpoint is a service endpoint description -type Endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable aws.Ternary - - Hostname string - Protocols []string - - CredentialScope CredentialScope - - SignatureVersions []string `json:"signatureVersions"` -} - -func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint { - var merged Endpoint - merged.mergeIn(def) - merged.mergeIn(e) - e = merged - - var u string - if e.Unresolveable != aws.TrueTernary { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := strings.Replace(e.Hostname, "{region}", region, 1) - - scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) - u = scheme + "://" + hostname - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - - return aws.Endpoint{ - URL: u, - PartitionID: partition, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func (e *Endpoint) mergeIn(other Endpoint) { - if other.Unresolveable != aws.UnknownTernary { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } -} - -func getEndpointScheme(protocols []string, disableHTTPS bool) string { - if disableHTTPS { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -// MapFIPSRegion extracts the intrinsic AWS region from one that may have an -// embedded FIPS microformat. -func MapFIPSRegion(region string) string { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(region, fipsInfix) || - strings.Contains(region, fipsPrefix) || - strings.Contains(region, fipsSuffix) { - region = strings.ReplaceAll(region, fipsInfix, "-") - region = strings.ReplaceAll(region, fipsPrefix, "") - region = strings.ReplaceAll(region, fipsSuffix, "") - } - - return region -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md deleted file mode 100644 index ec7d54beffda..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ /dev/null @@ -1,409 +0,0 @@ -# v2.7.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.7.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.7.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.7.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.7.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.37 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.36 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.35 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.34 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.33 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.32 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.31 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.30 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.29 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v2.6.28 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.27 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.26 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.25 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.24 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.23 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.22 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.21 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.20 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.19 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.37 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.36 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.29 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.28 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.27 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.26 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.25 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.3.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.2.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.1.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.0 (2021-11-06) - -* **Release**: Endpoint Variant Model Support -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go deleted file mode 100644 index 32251a7e3cc1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go +++ /dev/null @@ -1,302 +0,0 @@ -package endpoints - -import ( - "fmt" - "github.com/aws/smithy-go/logging" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// DefaultKey is a compound map key of a variant and other values. -type DefaultKey struct { - Variant EndpointVariant - ServiceVariant ServiceVariant -} - -// EndpointKey is a compound map key of a region and associated variant value. -type EndpointKey struct { - Region string - Variant EndpointVariant - ServiceVariant ServiceVariant -} - -// EndpointVariant is a bit field to describe the endpoints attributes. -type EndpointVariant uint64 - -const ( - // FIPSVariant indicates that the endpoint is FIPS capable. - FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) - - // DualStackVariant indicates that the endpoint is DualStack capable. - DualStackVariant -) - -// ServiceVariant is a bit field to describe the service endpoint attributes. -type ServiceVariant uint64 - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "s3v4"} -) - -// Options provide configuration needed to direct how endpoints are resolved. -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. - LogDeprecated bool - - // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority - // over the region name passed to the ResolveEndpoint call. - ResolvedRegion string - - // Disable usage of HTTPS (TLS / SSL) - DisableHTTPS bool - - // Instruct the resolver to use a service endpoint that supports dual-stack. - // If a service does not have a dual-stack endpoint an error will be returned by the resolver. - UseDualStackEndpoint aws.DualStackEndpointState - - // Instruct the resolver to use a service endpoint that supports FIPS. - // If a service does not have a FIPS endpoint an error will be returned by the resolver. - UseFIPSEndpoint aws.FIPSEndpointState - - // ServiceVariant is a bitfield of service specified endpoint variant data. - ServiceVariant ServiceVariant -} - -// GetEndpointVariant returns the EndpointVariant for the variant associated options. -func (o Options) GetEndpointVariant() (v EndpointVariant) { - if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - v |= DualStackVariant - } - if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - v |= FIPSVariant - } - return v -} - -// Partitions is a slice of partition -type Partitions []Partition - -// ResolveEndpoint resolves a service endpoint for the given region and options. -func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { - if len(ps) == 0 { - return aws.Endpoint{}, fmt.Errorf("no partitions found") - } - - if opts.Logger == nil { - opts.Logger = logging.Nop{} - } - - if len(opts.ResolvedRegion) > 0 { - region = opts.ResolvedRegion - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(region, opts) { - continue - } - - return ps[i].ResolveEndpoint(region, opts) - } - - // fallback to first partition format to use when resolving the endpoint. - return ps[0].ResolveEndpoint(region, opts) -} - -// Partition is an AWS partition description for a service and its' region endpoints. -type Partition struct { - ID string - RegionRegex *regexp.Regexp - PartitionEndpoint string - IsRegionalized bool - Defaults map[DefaultKey]Endpoint - Endpoints Endpoints -} - -func (p Partition) canResolveEndpoint(region string, opts Options) bool { - _, ok := p.Endpoints[EndpointKey{ - Region: region, - Variant: opts.GetEndpointVariant(), - }] - return ok || p.RegionRegex.MatchString(region) -} - -// ResolveEndpoint resolves and service endpoint for the given region and options. -func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { - if len(region) == 0 && len(p.PartitionEndpoint) != 0 { - region = p.PartitionEndpoint - } - - endpoints := p.Endpoints - - variant := options.GetEndpointVariant() - serviceVariant := options.ServiceVariant - - defaults := p.Defaults[DefaultKey{ - Variant: variant, - ServiceVariant: serviceVariant, - }] - - return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) -} - -func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { - key := EndpointKey{ - Region: region, - Variant: variant, - } - - if e, ok := endpoints[key]; ok { - return e - } - - if !p.IsRegionalized { - return endpoints[EndpointKey{ - Region: p.PartitionEndpoint, - Variant: variant, - ServiceVariant: serviceVariant, - }] - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return Endpoint{} -} - -// Endpoints is a map of service config regions to endpoints -type Endpoints map[EndpointKey]Endpoint - -// CredentialScope is the credential scope of a region and service -type CredentialScope struct { - Region string - Service string -} - -// Endpoint is a service endpoint description -type Endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable aws.Ternary - - Hostname string - Protocols []string - - CredentialScope CredentialScope - - SignatureVersions []string - - // Indicates that this endpoint is deprecated. - Deprecated aws.Ternary -} - -// IsZero returns whether the endpoint structure is an empty (zero) value. -func (e Endpoint) IsZero() bool { - switch { - case e.Unresolveable != aws.UnknownTernary: - return false - case len(e.Hostname) != 0: - return false - case len(e.Protocols) != 0: - return false - case e.CredentialScope != (CredentialScope{}): - return false - case len(e.SignatureVersions) != 0: - return false - } - return true -} - -func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { - var merged Endpoint - merged.mergeIn(def) - merged.mergeIn(e) - e = merged - - if e.IsZero() { - return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) - } - - var u string - if e.Unresolveable != aws.TrueTernary { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := strings.Replace(e.Hostname, "{region}", region, 1) - - scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) - u = scheme + "://" + hostname - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - - if e.Deprecated == aws.TrueTernary && options.LogDeprecated { - options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) - } - - return aws.Endpoint{ - URL: u, - PartitionID: partition, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func (e *Endpoint) mergeIn(other Endpoint) { - if other.Unresolveable != aws.UnknownTernary { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if other.Deprecated != aws.UnknownTernary { - e.Deprecated = other.Deprecated - } -} - -func getEndpointScheme(protocols []string, disableHTTPS bool) string { - if disableHTTPS { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go deleted file mode 100644 index e7b4e1cd18cf..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package endpoints - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "2.7.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md deleted file mode 100644 index f729db535b72..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ /dev/null @@ -1,283 +0,0 @@ -# v1.8.3 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 - -# v1.8.2 (2025-01-24) - -* **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir - -# v1.8.1 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. - -# v1.8.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.7.3 (2024-01-22) - -* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. - -# v1.7.2 (2023-12-08) - -* **Bug Fix**: Correct loading of [services *] sections into shared config. - -# v1.7.1 (2023-11-16) - -* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it. - -# v1.7.0 (2023-11-13) - -* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. - -# v1.6.0 (2023-11-09.2) - -* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored - -# v1.5.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2023-11-07) - -* **Bug Fix**: Fix subproperty performance regression - -# v1.5.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.45 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.44 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.43 (2023-09-22) - -* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. -* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. - -# v1.3.42 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.41 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.40 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.39 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.38 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.37 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.36 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.35 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.34 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.33 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.32 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.31 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.30 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.29 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.28 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2022-05-17) - -* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2022-03-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2022-02-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2022-01-28) - -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. - -# v1.3.4 (2022-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2022-01-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2021-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-08-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-07-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-07-01) - -* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. - -# v1.0.1 (2021-06-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2021-05-20) - -* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go deleted file mode 100644 index 0f278d55e6c4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package ini - -import "fmt" - -// UnableToReadFile is an error indicating that a ini file could not be read -type UnableToReadFile struct { - Err error -} - -// Error returns an error message and the underlying error message if present -func (e *UnableToReadFile) Error() string { - base := "unable to read file" - if e.Err == nil { - return base - } - return fmt.Sprintf("%s: %v", base, e.Err) -} - -// Unwrap returns the underlying error -func (e *UnableToReadFile) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go deleted file mode 100644 index 00df0e3cb9bc..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ini - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go deleted file mode 100644 index cefcce91e76c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package ini implements parsing of the AWS shared config file. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -package ini - -import ( - "fmt" - "io" - "os" - "strings" -) - -// OpenFile parses shared config from the given file path. -func OpenFile(path string) (sections Sections, err error) { - f, oerr := os.Open(path) - if oerr != nil { - return Sections{}, &UnableToReadFile{Err: oerr} - } - - defer func() { - closeErr := f.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) - } - }() - - return Parse(f, path) -} - -// Parse parses shared config from the given reader. -func Parse(r io.Reader, path string) (Sections, error) { - contents, err := io.ReadAll(r) - if err != nil { - return Sections{}, fmt.Errorf("read all: %v", err) - } - - lines := strings.Split(string(contents), "\n") - tokens, err := tokenize(lines) - if err != nil { - return Sections{}, fmt.Errorf("tokenize: %v", err) - } - - return parse(tokens, path), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go deleted file mode 100644 index 2422d90461be..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go +++ /dev/null @@ -1,109 +0,0 @@ -package ini - -import ( - "fmt" - "strings" -) - -func parse(tokens []lineToken, path string) Sections { - parser := &parser{ - path: path, - sections: NewSections(), - } - parser.parse(tokens) - return parser.sections -} - -type parser struct { - csection, ckey string // current state - path string // source file path - sections Sections // parse result -} - -func (p *parser) parse(tokens []lineToken) { - for _, otok := range tokens { - switch tok := otok.(type) { - case *lineTokenProfile: - p.handleProfile(tok) - case *lineTokenProperty: - p.handleProperty(tok) - case *lineTokenSubProperty: - p.handleSubProperty(tok) - case *lineTokenContinuation: - p.handleContinuation(tok) - } - } -} - -func (p *parser) handleProfile(tok *lineTokenProfile) { - name := tok.Name - if tok.Type != "" { - name = fmt.Sprintf("%s %s", tok.Type, tok.Name) - } - p.ckey = "" - p.csection = name - if _, ok := p.sections.container[name]; !ok { - p.sections.container[name] = NewSection(name) - } -} - -func (p *parser) handleProperty(tok *lineTokenProperty) { - if p.csection == "" { - return // LEGACY: don't error on "global" properties - } - - p.ckey = tok.Key - if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { - section := p.sections.container[p.csection] - section.Logs = append(p.sections.container[p.csection].Logs, - fmt.Sprintf( - "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", - p.csection, tok.Key, tok.Key, p.path, - ), - ) - p.sections.container[p.csection] = section - } - - p.sections.container[p.csection].values[tok.Key] = Value{ - str: tok.Value, - } - p.sections.container[p.csection].SourceFile[tok.Key] = p.path -} - -func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { - if p.csection == "" { - return // LEGACY: don't error on "global" properties - } - - if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { - // This is an "orphaned" subproperty, either because it's at - // the beginning of a section or because the last property's - // value isn't empty. Either way we're lenient here and - // "promote" this to a normal property. - p.handleProperty(&lineTokenProperty{ - Key: tok.Key, - Value: strings.TrimSpace(trimPropertyComment(tok.Value)), - }) - return - } - - if p.sections.container[p.csection].values[p.ckey].mp == nil { - p.sections.container[p.csection].values[p.ckey] = Value{ - mp: map[string]string{}, - } - } - p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value -} - -func (p *parser) handleContinuation(tok *lineTokenContinuation) { - if p.ckey == "" { - return - } - - value, _ := p.sections.container[p.csection].values[p.ckey] - if value.str != "" && value.mp == nil { - value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) - } - - p.sections.container[p.csection].values[p.ckey] = value -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go deleted file mode 100644 index dd89848e6961..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go +++ /dev/null @@ -1,157 +0,0 @@ -package ini - -import ( - "sort" -) - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// NewSections returns empty ini Sections -func NewSections() Sections { - return Sections{ - container: make(map[string]Section, 0), - } -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// HasSection denotes if Sections consist of a section with -// provided name. -func (t Sections) HasSection(p string) bool { - _, ok := t.container[p] - return ok -} - -// SetSection sets a section value for provided section name. -func (t Sections) SetSection(p string, v Section) Sections { - t.container[p] = v - return t -} - -// DeleteSection deletes a section entry/value for provided section name./ -func (t Sections) DeleteSection(p string) { - delete(t.container, p) -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - // Name is the Section profile name - Name string - - // values are the values within parsed profile - values values - - // Errors is the list of errors - Errors []error - - // Logs is the list of logs - Logs []string - - // SourceFile is the INI Source file from where this section - // was retrieved. They key is the property, value is the - // source file the property was retrieved from. - SourceFile map[string]string -} - -// NewSection returns an initialize section for the name -func NewSection(name string) Section { - return Section{ - Name: name, - values: values{}, - SourceFile: map[string]string{}, - } -} - -// List will return a list of all -// services in values -func (t Section) List() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// UpdateSourceFile updates source file for a property to provided filepath. -func (t Section) UpdateSourceFile(property string, filepath string) { - t.SourceFile[property] = filepath -} - -// UpdateValue updates value for a provided key with provided value -func (t Section) UpdateValue(k string, v Value) error { - t.values[k] = v - return nil -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) (bool, bool) { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) (int64, bool) { - return t.values[k].IntValue() -} - -// Map returns a map value at k -func (t Section) Map(k string) map[string]string { - return t.values[k].MapValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) (float64, bool) { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go deleted file mode 100644 index ed77d0835179..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go +++ /dev/null @@ -1,89 +0,0 @@ -package ini - -import ( - "strings" -) - -func trimProfileComment(s string) string { - r, _, _ := strings.Cut(s, "#") - r, _, _ = strings.Cut(r, ";") - return r -} - -func trimPropertyComment(s string) string { - r, _, _ := strings.Cut(s, " #") - r, _, _ = strings.Cut(r, " ;") - r, _, _ = strings.Cut(r, "\t#") - r, _, _ = strings.Cut(r, "\t;") - return r -} - -// assumes no surrounding comment -func splitProperty(s string) (string, string, bool) { - equalsi := strings.Index(s, "=") - coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment - sep := "=" - if equalsi == -1 || coloni != -1 && coloni < equalsi { - sep = ":" - } - - k, v, ok := strings.Cut(s, sep) - if !ok { - return "", "", false - } - return strings.TrimSpace(k), strings.TrimSpace(v), true -} - -// assumes no surrounding comment, whitespace, or profile brackets -func splitProfile(s string) (string, string) { - var first int - for i, r := range s { - if isLineSpace(r) { - if first == 0 { - first = i - } - } else { - if first != 0 { - return s[:first], s[i:] - } - } - } - if first == 0 { - return "", s // type component is effectively blank - } - return "", "" -} - -func isLineSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func unquote(s string) string { - if isSingleQuoted(s) || isDoubleQuoted(s) { - return s[1 : len(s)-1] - } - return s -} - -// applies various legacy conversions to property values: -// - remote wrapping single/doublequotes -func legacyStrconv(s string) string { - s = unquote(s) - return s -} - -func isSingleQuoted(s string) bool { - return hasAffixes(s, "'", "'") -} - -func isDoubleQuoted(s string) bool { - return hasAffixes(s, `"`, `"`) -} - -func isBracketed(s string) bool { - return hasAffixes(s, "[", "]") -} - -func hasAffixes(s, left, right string) bool { - return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go deleted file mode 100644 index 6e9a03744e0e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go +++ /dev/null @@ -1,32 +0,0 @@ -package ini - -type lineToken interface { - isLineToken() -} - -type lineTokenProfile struct { - Type string - Name string -} - -func (*lineTokenProfile) isLineToken() {} - -type lineTokenProperty struct { - Key string - Value string -} - -func (*lineTokenProperty) isLineToken() {} - -type lineTokenContinuation struct { - Value string -} - -func (*lineTokenContinuation) isLineToken() {} - -type lineTokenSubProperty struct { - Key string - Value string -} - -func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go deleted file mode 100644 index 89a7736841ee..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go +++ /dev/null @@ -1,92 +0,0 @@ -package ini - -import ( - "strings" -) - -func tokenize(lines []string) ([]lineToken, error) { - tokens := make([]lineToken, 0, len(lines)) - for _, line := range lines { - if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { - continue - } - - if tok := asProfile(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asProperty(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asSubProperty(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asContinuation(line); tok != nil { - tokens = append(tokens, tok) - } // unrecognized tokens are effectively ignored - } - return tokens, nil -} - -func isLineComment(line string) bool { - trimmed := strings.TrimLeft(line, " \t") - return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") -} - -func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" - trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]" - if !isBracketed(trimmed) { - return nil - } - trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") - trimmed = strings.TrimSpace(trimmed) // "type name" / "name" - typ, name := splitProfile(trimmed) - return &lineTokenProfile{ - Type: typ, - Name: name, - } -} - -func asProperty(line string) *lineTokenProperty { - if isLineSpace(rune(line[0])) { - return nil - } - - trimmed := trimPropertyComment(line) - trimmed = strings.TrimRight(trimmed, " \t") - k, v, ok := splitProperty(trimmed) - if !ok { - return nil - } - - return &lineTokenProperty{ - Key: strings.ToLower(k), // LEGACY: normalize key case - Value: legacyStrconv(v), // LEGACY: see func docs - } -} - -func asSubProperty(line string) *lineTokenSubProperty { - if !isLineSpace(rune(line[0])) { - return nil - } - - // comments on sub-properties are included in the value - trimmed := strings.TrimLeft(line, " \t") - k, v, ok := splitProperty(trimmed) - if !ok { - return nil - } - - return &lineTokenSubProperty{ // same LEGACY constraints as in normal property - Key: strings.ToLower(k), - Value: legacyStrconv(v), - } -} - -func asContinuation(line string) *lineTokenContinuation { - if !isLineSpace(rune(line[0])) { - return nil - } - - // includes comments like sub-properties - trimmed := strings.TrimLeft(line, " \t") - return &lineTokenContinuation{ - Value: trimmed, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go deleted file mode 100644 index e3706b3c31b1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go +++ /dev/null @@ -1,93 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case StringType: - return "STRING" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - StringType - QuotedStringType -) - -// Value is a union container -type Value struct { - Type ValueType - - str string - mp map[string]string -} - -// NewStringValue returns a Value type generated using a string input. -func NewStringValue(str string) (Value, error) { - return Value{str: str}, nil -} - -func (v Value) String() string { - switch v.Type { - case StringType: - return fmt.Sprintf("string: %s", string(v.str)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.str)) - default: - return "union not set" - } -} - -// MapValue returns a map value for sub properties -func (v Value) MapValue() map[string]string { - return v.mp -} - -// IntValue returns an integer value -func (v Value) IntValue() (int64, bool) { - i, err := strconv.ParseInt(string(v.str), 0, 64) - if err != nil { - return 0, false - } - return i, true -} - -// FloatValue returns a float value -func (v Value) FloatValue() (float64, bool) { - f, err := strconv.ParseFloat(string(v.str), 64) - if err != nil { - return 0, false - } - return f, true -} - -// BoolValue returns a bool value -func (v Value) BoolValue() (bool, bool) { - // we don't use ParseBool as it recognizes more than what we've - // historically supported - if strings.EqualFold(v.str, "true") { - return true, true - } else if strings.EqualFold(v.str, "false") { - return false, true - } - return false, false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - return v.str -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go deleted file mode 100644 index 8e24a3f0a470..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go +++ /dev/null @@ -1,42 +0,0 @@ -package middleware - -import ( - "context" - "sync/atomic" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. -// This can be read by other operations (such as signing) to correct the date value they send -// on the request -type AddTimeOffsetMiddleware struct { - Offset *atomic.Int64 -} - -// ID the identifier for AddTimeOffsetMiddleware -func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } - -// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. -func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - if m.Offset != nil { - offset := time.Duration(m.Offset.Load()) - ctx = internalcontext.SetAttemptSkewContext(ctx, offset) - } - return next.HandleBuild(ctx, in) -} - -// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer -// held by AddTimeOffsetMiddleware -func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { - m.Offset.Store(v.Nanoseconds()) - } - return next.HandleDeserialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go deleted file mode 100644 index c8484dcd7592..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go +++ /dev/null @@ -1,33 +0,0 @@ -package rand - -import ( - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func init() { - Reader = rand.Reader -} - -// Reader provides a random reader that can reset during testing. -var Reader io.Reader - -var floatMaxBigInt = big.NewInt(1 << 53) - -// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). -func Float64(reader io.Reader) (float64, error) { - bi, err := rand.Int(reader, floatMaxBigInt) - if err != nil { - return 0, fmt.Errorf("failed to read random value, %v", err) - } - - return float64(bi.Int64()) / (1 << 53), nil -} - -// CryptoRandFloat64 returns a random float64 obtained from the crypto rand -// source. -func CryptoRandFloat64() (float64, error) { - return Float64(Reader) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go deleted file mode 100644 index 2b42cbe6421a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go +++ /dev/null @@ -1,9 +0,0 @@ -package sdk - -// Invalidator provides access to a type's invalidate method to make it -// invalidate it cache. -// -// e.g aws.SafeCredentialsProvider's Invalidate method. -type Invalidator interface { - Invalidate() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go deleted file mode 100644 index 8e8dabad5488..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go +++ /dev/null @@ -1,74 +0,0 @@ -package sdk - -import ( - "context" - "time" -) - -func init() { - NowTime = time.Now - Sleep = time.Sleep - SleepWithContext = sleepWithContext -} - -// NowTime is a value for getting the current time. This value can be overridden -// for testing mocking out current time. -var NowTime func() time.Time - -// Sleep is a value for sleeping for a duration. This value can be overridden -// for testing and mocking out sleep duration. -var Sleep func(time.Duration) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// This value can be overridden for testing and mocking out sleep duration. -var SleepWithContext func(context.Context, time.Duration) error - -// sleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the -// Context's error will be returned. -func sleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} - -// noOpSleepWithContext does nothing, returns immediately. -func noOpSleepWithContext(context.Context, time.Duration) error { - return nil -} - -func noOpSleep(time.Duration) {} - -// TestingUseNopSleep is a utility for disabling sleep across the SDK for -// testing. -func TestingUseNopSleep() func() { - SleepWithContext = noOpSleepWithContext - Sleep = noOpSleep - - return func() { - SleepWithContext = sleepWithContext - Sleep = time.Sleep - } -} - -// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time -// for testing purposes. -func TestingUseReferenceTime(referenceTime time.Time) func() { - NowTime = func() time.Time { - return referenceTime - } - return func() { - NowTime = time.Now - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go deleted file mode 100644 index 6c443988bbc9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go deleted file mode 100644 index c96b717e08a2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,47 +0,0 @@ -package shareddefaults - -import ( - "os" - "os/user" - "path/filepath" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - // Ignore errors since we only care about Windows and *nix. - home, _ := os.UserHomeDir() - - if len(home) > 0 { - return home - } - - currUser, _ := user.Current() - if currUser != nil { - home = currUser.HomeDir - } - - return home -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go deleted file mode 100644 index d008ae27cb31..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE deleted file mode 100644 index fe6a62006a52..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go deleted file mode 100644 index cb70616e8027..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package singleflight provides a duplicate function call suppression -// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight -// package. The package is forked, because the package a part of the unstable -// and unversioned golang.org/x/sync module. -// -// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight -package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go deleted file mode 100644 index e8a1b17d5640..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package singleflight - -import ( - "bytes" - "errors" - "fmt" - "runtime" - "runtime/debug" - "sync" -) - -// errGoexit indicates the runtime.Goexit was called in -// the user given function. -var errGoexit = errors.New("runtime.Goexit was called") - -// A panicError is an arbitrary value recovered from a panic -// with the stack trace during the execution of given function. -type panicError struct { - value interface{} - stack []byte -} - -// Error implements error interface. -func (p *panicError) Error() string { - return fmt.Sprintf("%v\n\n%s", p.value, p.stack) -} - -func newPanicError(v interface{}) error { - stack := debug.Stack() - - // The first line of the stack trace is of the form "goroutine N [status]:" - // but by the time the panic reaches Do the goroutine may no longer exist - // and its status will have changed. Trim out the misleading line. - if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { - stack = stack[line+1:] - } - return &panicError{value: v, stack: stack} -} - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - - if e, ok := c.err.(*panicError); ok { - panic(e) - } else if c.err == errGoexit { - runtime.Goexit() - } - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -// -// The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - normalReturn := false - recovered := false - - // use double-defer to distinguish panic from runtime.Goexit, - // more details see https://golang.org/cl/134395 - defer func() { - // the given function invoked runtime.Goexit - if !normalReturn && !recovered { - c.err = errGoexit - } - - c.wg.Done() - g.mu.Lock() - defer g.mu.Unlock() - if !c.forgotten { - delete(g.m, key) - } - - if e, ok := c.err.(*panicError); ok { - // In order to prevent the waiting channels from being blocked forever, - // needs to ensure that this panic cannot be recovered. - if len(c.chans) > 0 { - go panic(e) - select {} // Keep this goroutine around so that it will appear in the crash dump. - } else { - panic(e) - } - } else if c.err == errGoexit { - // Already in the process of goexit, no need to call again - } else { - // Normal return - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - } - }() - - func() { - defer func() { - if !normalReturn { - // Ideally, we would wait to take a stack trace until we've determined - // whether this is a panic or a runtime.Goexit. - // - // Unfortunately, the only way we can distinguish the two is to see - // whether the recover stopped the goroutine from terminating, and by - // the time we know that, the part of the stack trace relevant to the - // panic has been discarded. - if r := recover(); r != nil { - c.err = newPanicError(r) - } - } - }() - - c.val, c.err = fn() - normalReturn = true - }() - - if !normalReturn { - recovered = true - } -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go deleted file mode 100644 index 5d69db5f2497..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go +++ /dev/null @@ -1,13 +0,0 @@ -package timeconv - -import "time" - -// FloatSecondsDur converts a fractional seconds to duration. -func FloatSecondsDur(v float64) time.Duration { - return time.Duration(v * float64(time.Second)) -} - -// DurSecondsFloat converts a duration into fractional seconds. -func DurSecondsFloat(d time.Duration) float64 { - return float64(d) / float64(time.Second) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md deleted file mode 100644 index 637b81ddd63f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ /dev/null @@ -1,372 +0,0 @@ -# v1.4.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.37 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.36 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.35 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.34 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.33 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.32 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.31 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.30 (2025-01-30) - -* **Bug Fix**: Do not sign Transfer-Encoding header in Sigv4[a]. Fixes a signer mismatch issue with S3 Accelerate. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.29 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.3.28 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.4 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.28 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.27 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.26 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.25 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.24 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.23 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.22 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.21 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.20 (2023-02-14) - -* No change notes available for this release. - -# v1.0.19 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.18 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.17 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.16 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.15 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.14 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.13 (2022-09-14) - -* **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.12 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.11 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.10 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.9 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.8 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.7 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.6 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.5 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.4 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.3 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.2 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.1 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2022-04-07) - -* **Release**: New internal v4a signing module location. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go deleted file mode 100644 index 3ae3a019e628..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go +++ /dev/null @@ -1,141 +0,0 @@ -package v4a - -import ( - "context" - "crypto/ecdsa" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// Credentials is Context, ECDSA, and Optional Session Token that can be used -// to sign requests using SigV4a -type Credentials struct { - Context string - PrivateKey *ecdsa.PrivateKey - SessionToken string - - // Time the credentials will expire. - CanExpire bool - Expires time.Time -} - -// Expired returns if the credentials have expired. -func (v Credentials) Expired() bool { - if v.CanExpire { - return !v.Expires.After(sdk.NowTime()) - } - - return false -} - -// HasKeys returns if the credentials keys are set. -func (v Credentials) HasKeys() bool { - return len(v.Context) > 0 && v.PrivateKey != nil -} - -// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials -// to a ECDSA PrivateKey for signing with SiV4a -type SymmetricCredentialAdaptor struct { - SymmetricProvider aws.CredentialsProvider - - asymmetric atomic.Value - m sync.Mutex -} - -// Retrieve retrieves symmetric credentials from the underlying provider. -func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) { - symCreds, err := s.retrieveFromSymmetricProvider(ctx) - if err != nil { - return aws.Credentials{}, err - } - - if asymCreds := s.getCreds(); asymCreds == nil { - return symCreds, nil - } - - s.m.Lock() - defer s.m.Unlock() - - asymCreds := s.getCreds() - if asymCreds == nil { - return symCreds, nil - } - - // if the context does not match the access key id clear it - if asymCreds.Context != symCreds.AccessKeyID { - s.asymmetric.Store((*Credentials)(nil)) - } - - return symCreds, nil -} - -// RetrievePrivateKey returns credentials suitable for SigV4a signing -func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) { - if asymCreds := s.getCreds(); asymCreds != nil { - return *asymCreds, nil - } - - s.m.Lock() - defer s.m.Unlock() - - if asymCreds := s.getCreds(); asymCreds != nil { - return *asymCreds, nil - } - - symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx) - if err != nil { - return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err) - } - - privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey) - if err != nil { - return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials") - } - - creds := Credentials{ - Context: symmetricCreds.AccessKeyID, - PrivateKey: privateKey, - SessionToken: symmetricCreds.SessionToken, - CanExpire: symmetricCreds.CanExpire, - Expires: symmetricCreds.Expires, - } - - s.asymmetric.Store(&creds) - - return creds, nil -} - -func (s *SymmetricCredentialAdaptor) getCreds() *Credentials { - v := s.asymmetric.Load() - - if v == nil { - return nil - } - - c := v.(*Credentials) - if c != nil && c.HasKeys() && !c.Expired() { - return c - } - - return nil -} - -func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) { - credentials, err := s.SymmetricProvider.Retrieve(ctx) - if err != nil { - return aws.Credentials{}, err - } - - return credentials, nil -} - -// CredentialsProvider is the interface for a provider to retrieve credentials -// to sign requests with. -type CredentialsProvider interface { - RetrievePrivateKey(context.Context) (Credentials, error) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go deleted file mode 100644 index 380d17427146..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package v4a - -import "fmt" - -// SigningError indicates an error condition occurred while performing SigV4a signing -type SigningError struct { - Err error -} - -func (e *SigningError) Error() string { - return fmt.Sprintf("failed to sign request: %v", e.Err) -} - -// Unwrap returns the underlying error cause -func (e *SigningError) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go deleted file mode 100644 index 0eb6d8d74d13..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package v4a - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go deleted file mode 100644 index 1d0f25f8c203..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go +++ /dev/null @@ -1,30 +0,0 @@ -package crypto - -import "fmt" - -// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison -// if the two byte slices assuming they represent a big-endian number. -// -// error if len(x) != len(y) -// -1 if x < y -// 0 if x == y -// +1 if x > y -func ConstantTimeByteCompare(x, y []byte) (int, error) { - if len(x) != len(y) { - return 0, fmt.Errorf("slice lengths do not match") - } - - xLarger, yLarger := 0, 0 - - for i := 0; i < len(x); i++ { - xByte, yByte := int(x[i]), int(y[i]) - - x := ((yByte - xByte) >> 8) & 1 - y := ((xByte - yByte) >> 8) & 1 - - xLarger |= x &^ yLarger - yLarger |= y &^ xLarger - } - - return xLarger - yLarger, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go deleted file mode 100644 index 758c73fcb3e6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go +++ /dev/null @@ -1,113 +0,0 @@ -package crypto - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/hmac" - "encoding/asn1" - "encoding/binary" - "fmt" - "hash" - "math" - "math/big" -) - -type ecdsaSignature struct { - R, S *big.Int -} - -// ECDSAKey takes the given elliptic curve, and private key (d) byte slice -// and returns the private ECDSA key. -func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { - return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d)) -} - -// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the -// private and public keypair -func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { - pX, pY := curve.ScalarBaseMult(d.Bytes()) - - privKey := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: pX, - Y: pY, - }, - D: d, - } - - return privKey -} - -// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns -// *ecdsa.PublicKey. Returns an error if the given points are not on the curve. -func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { - xPoint := (&big.Int{}).SetBytes(x) - yPoint := (&big.Int{}).SetBytes(y) - - if !curve.IsOnCurve(xPoint, yPoint) { - return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) - } - - return &ecdsa.PublicKey{ - Curve: curve, - X: xPoint, - Y: yPoint, - }, nil -} - -// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns -// whether the given signature is valid. -func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) { - var ecdsaSignature ecdsaSignature - - _, err := asn1.Unmarshal(signature, &ecdsaSignature) - if err != nil { - return false, err - } - - return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil -} - -// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode. -// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of -// `r` is defined as a 4 byte counter. -func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) { - // verify that we won't overflow the counter - n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size()))) - if n > 0x7FFFFFFF { - return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen) - } - - // verify the requested bit length is not larger then the length encoding size - if int64(bitLen) > 0x7FFFFFFF { - return nil, fmt.Errorf("bitLen is greater than 32-bits") - } - - fixedInput := bytes.NewBuffer(nil) - fixedInput.Write(label) - fixedInput.WriteByte(0x00) - fixedInput.Write(context) - if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil { - return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err) - } - - var output []byte - - h := hmac.New(hash, key) - - for i := int64(1); i <= n; i++ { - h.Reset() - if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil { - return nil, err - } - _, err := h.Write(fixedInput.Bytes()) - if err != nil { - return nil, err - } - output = append(output, h.Sum(nil)...) - } - - return output[:bitLen/8], nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go deleted file mode 100644 index 89a76e2eaab4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -const ( - // EmptyStringSHA256 is the hex encoded sha256 value of an empty string - EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` - - // UnsignedPayload indicates that the request payload body is unsigned - UnsignedPayload = "UNSIGNED-PAYLOAD" - - // AmzAlgorithmKey indicates the signing algorithm - AmzAlgorithmKey = "X-Amz-Algorithm" - - // AmzSecurityTokenKey indicates the security token to be used with temporary credentials - AmzSecurityTokenKey = "X-Amz-Security-Token" - - // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' - AmzDateKey = "X-Amz-Date" - - // AmzCredentialKey is the access key ID and credential scope - AmzCredentialKey = "X-Amz-Credential" - - // AmzSignedHeadersKey is the set of headers signed for the request - AmzSignedHeadersKey = "X-Amz-SignedHeaders" - - // AmzSignatureKey is the query parameter to store the SigV4 signature - AmzSignatureKey = "X-Amz-Signature" - - // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter - TimeFormat = "20060102T150405Z" - - // ShortTimeFormat is the shorten time format used in the credential scope - ShortTimeFormat = "20060102" - - // ContentSHAKey is the SHA256 of request body - ContentSHAKey = "X-Amz-Content-Sha256" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go deleted file mode 100644 index a15177e8f3f1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" -) - -// Rules houses a set of Rule needed for validation of a -// string value -type Rules []Rule - -// Rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that Rule -type Rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r Rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// MapRule generic Rule for maps -type MapRule map[string]struct{} - -// IsValid for the map Rule satisfies whether it exists in the map -func (m MapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// AllowList is a generic Rule for whitelisting -type AllowList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (w AllowList) IsValid(value string) bool { - return w.Rule.IsValid(value) -} - -// DenyList is a generic Rule for blacklisting -type DenyList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (b DenyList) IsValid(value string) bool { - return !b.Rule.IsValid(value) -} - -// Patterns is a list of strings to match against -type Patterns []string - -// IsValid for Patterns checks each pattern and returns if a match has -// been found -func (p Patterns) IsValid(value string) bool { - for _, pattern := range p { - if sdkstrings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// InclusiveRules rules allow for rules to depend on one another -type InclusiveRules []Rule - -// IsValid will return true if all rules are true -func (r InclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go deleted file mode 100644 index 688f834742c5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go +++ /dev/null @@ -1,68 +0,0 @@ -package v4 - -// IgnoredHeaders is a list of headers that are ignored during signing -var IgnoredHeaders = Rules{ - DenyList{ - MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - "Transfer-Encoding": struct{}{}, - }, - }, -} - -// RequiredSignedHeaders is a whitelist for Build canonical headers. -var RequiredSignedHeaders = Rules{ - AllowList{ - MapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - "X-Amz-Tagging": struct{}{}, - }, - }, - Patterns{"X-Amz-Meta-"}, -} - -// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value -// represents whether or not it is a pattern. -var AllowedQueryHoisting = InclusiveRules{ - DenyList{RequiredSignedHeaders}, - Patterns{"X-Amz-"}, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go deleted file mode 100644 index e7fa7a1b1e60..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" -) - -// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. -func HMACSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go deleted file mode 100644 index bf93659a43f3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go +++ /dev/null @@ -1,75 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go deleted file mode 100644 index 1de06a765d1b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -import "time" - -// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. -type SigningTime struct { - time.Time - timeFormat string - shortTimeFormat string -} - -// NewSigningTime creates a new SigningTime given a time.Time -func NewSigningTime(t time.Time) SigningTime { - return SigningTime{ - Time: t, - } -} - -// TimeFormat provides a time formatted in the X-Amz-Date format. -func (m *SigningTime) TimeFormat() string { - return m.format(&m.timeFormat, TimeFormat) -} - -// ShortTimeFormat provides a time formatted of 20060102. -func (m *SigningTime) ShortTimeFormat() string { - return m.format(&m.shortTimeFormat, ShortTimeFormat) -} - -func (m *SigningTime) format(target *string, format string) string { - if len(*target) > 0 { - return *target - } - v := m.Time.Format(format) - *target = v - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go deleted file mode 100644 index 741019b5f9da..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go +++ /dev/null @@ -1,64 +0,0 @@ -package v4 - -import ( - "net/url" - "strings" -) - -const doubleSpace = " " - -// StripExcessSpaces will rewrite the passed in slice's string values to not -// contain muliple side-by-side spaces. -func StripExcessSpaces(str string) string { - var j, k, l, m, spaces int - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - return str - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - return string(buf[:m]) -} - -// GetURIPath returns the escaped URI component from the provided URL -func GetURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go deleted file mode 100644 index 64b8b4e330e9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go +++ /dev/null @@ -1,118 +0,0 @@ -package v4a - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "time" -) - -// HTTPSigner is SigV4a HTTP signer implementation -type HTTPSigner interface { - SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optfns ...func(*SignerOptions)) error -} - -// SignHTTPRequestMiddlewareOptions is the middleware options for constructing a SignHTTPRequestMiddleware. -type SignHTTPRequestMiddlewareOptions struct { - Credentials CredentialsProvider - Signer HTTPSigner - LogSigning bool -} - -// SignHTTPRequestMiddleware is a middleware for signing an HTTP request using SigV4a. -type SignHTTPRequestMiddleware struct { - credentials CredentialsProvider - signer HTTPSigner - logSigning bool -} - -// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given SignHTTPRequestMiddlewareOptions. -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentials: options.Credentials, - signer: options.Signer, - logSigning: options.LogSigning, - } -} - -// ID the middleware identifier. -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize signs an HTTP request using SigV4a. -func (s *SignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if !hasCredentialProvider(s.credentials) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected request middleware type %T", in.Request) - } - - signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) - payloadHash := v4.GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} - } - - credentials, err := s.credentials.RetrievePrivateKey(ctx) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} - } - - signerOptions := []func(o *SignerOptions){ - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }, - } - - // existing DisableURIPathEscaping is equivalent in purpose - // to authentication scheme property DisableDoubleEncoding - disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) - if overridden { - signerOptions = append(signerOptions, func(o *SignerOptions) { - o.DisableURIPathEscaping = disableDoubleEncoding - }) - } - - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), signerOptions...) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} - } - - return next.HandleFinalize(ctx, in) -} - -func hasCredentialProvider(p CredentialsProvider) bool { - if p == nil { - return false - } - - return true -} - -// RegisterSigningMiddleware registers the SigV4a signing middleware to the stack. If a signing middleware is already -// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the -// finalize step. -func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { - const signedID = "Signing" - _, present := stack.Finalize.Get(signedID) - if present { - _, err = stack.Finalize.Swap(signedID, signingMiddleware) - } else { - err = stack.Finalize.Add(signingMiddleware, middleware.After) - } - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go deleted file mode 100644 index 951fc415d527..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go +++ /dev/null @@ -1,117 +0,0 @@ -package v4a - -import ( - "context" - "fmt" - "net/http" - "time" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - smithyHTTP "github.com/aws/smithy-go/transport/http" -) - -// HTTPPresigner is an interface to a SigV4a signer that can sign create a -// presigned URL for a HTTP requests. -type HTTPPresigner interface { - PresignHTTP( - ctx context.Context, credentials Credentials, r *http.Request, - payloadHash string, service string, regionSet []string, signingTime time.Time, - optFns ...func(*SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider CredentialsProvider - Presigner HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - credentialsProvider CredentialsProvider - presigner HTTPPresigner - logSigning bool -} - -// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware -// initialized with the presigner. -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -func (s *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyHTTP.Request) - if !ok { - return out, metadata, &SigningError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - httpReq := req.Build(ctx) - if !hasCredentialProvider(s.credentialsProvider) { - out.Result = &v4.PresignedHTTPRequest{ - URL: httpReq.URL.String(), - Method: httpReq.Method, - SignedHeader: http.Header{}, - } - - return out, metadata, nil - } - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - payloadHash := v4.GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{ - Err: fmt.Errorf("computed payload hash missing from context"), - } - } - - credentials, err := s.credentialsProvider.RetrievePrivateKey(ctx) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - - u, h, err := s.presigner.PresignHTTP(ctx, credentials, - httpReq, payloadHash, signingName, []string{signingRegion}, sdk.NowTime(), - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &v4.PresignedHTTPRequest{ - URL: u, - Method: httpReq.Method, - SignedHeader: h, - } - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go deleted file mode 100644 index af4f6abcfa73..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go +++ /dev/null @@ -1,92 +0,0 @@ -package v4a - -import ( - "context" - "fmt" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// CredentialsAdapter adapts v4a.Credentials to smithy auth.Identity. -type CredentialsAdapter struct { - Credentials Credentials -} - -var _ auth.Identity = (*CredentialsAdapter)(nil) - -// Expiration returns the time of expiration for the credentials. -func (v *CredentialsAdapter) Expiration() time.Time { - return v.Credentials.Expires -} - -// CredentialsProviderAdapter adapts v4a.CredentialsProvider to -// auth.IdentityResolver. -type CredentialsProviderAdapter struct { - Provider CredentialsProvider -} - -var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) - -// GetIdentity retrieves v4a credentials using the underlying provider. -func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - creds, err := v.Provider.RetrievePrivateKey(ctx) - if err != nil { - return nil, fmt.Errorf("get credentials: %w", err) - } - - return &CredentialsAdapter{Credentials: creds}, nil -} - -// SignerAdapter adapts v4a.HTTPSigner to smithy http.Signer. -type SignerAdapter struct { - Signer HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*SignerAdapter)(nil) - -// SignRequest signs the request with the provided identity. -func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4a signing name is required") - } - - regions, ok := smithyhttp.GetSigV4ASigningRegions(&props) - if !ok { - return fmt.Errorf("sigv4a signing region is required") - } - - hash := v4.GetPayloadHash(ctx) - signingTime := sdk.NowTime() - if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 { - signingTime.Add(skew) - } - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) { - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %w", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go deleted file mode 100644 index f1f6ecc37140..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go +++ /dev/null @@ -1,520 +0,0 @@ -package v4a - -import ( - "bytes" - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash" - "math/big" - "net/http" - "net/textproto" - "net/url" - "sort" - "strconv" - "strings" - "time" - - signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto" - v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/logging" -) - -const ( - // AmzRegionSetKey represents the region set header used for sigv4a - AmzRegionSetKey = "X-Amz-Region-Set" - amzAlgorithmKey = v4Internal.AmzAlgorithmKey - amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey - amzDateKey = v4Internal.AmzDateKey - amzCredentialKey = v4Internal.AmzCredentialKey - amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey - authorizationHeader = "Authorization" - - signingAlgorithm = "AWS4-ECDSA-P256-SHA256" - - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - - // EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string - EmptyStringSHA256 = v4Internal.EmptyStringSHA256 - - // Version of signing v4a - Version = "SigV4A" -) - -var ( - p256 elliptic.Curve - nMinusTwoP256 *big.Int - - one = new(big.Int).SetInt64(1) -) - -func init() { - // Ensure the elliptic curve parameters are initialized on package import rather then on first usage - p256 = elliptic.P256() - - nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes()) - nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2)) -} - -// SignerOptions is the SigV4a signing options for constructing a Signer. -type SignerOptions struct { - Logger logging.Logger - LogSigning bool - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool -} - -// Signer is a SigV4a HTTP signing implementation -type Signer struct { - options SignerOptions -} - -// NewSigner constructs a SigV4a Signer. -func NewSigner(optFns ...func(*SignerOptions)) *Signer { - options := SignerOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Signer{options: options} -} - -// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given -// IAM AccessKey and SecretKey pair. -// -// Based on FIPS.186-4 Appendix B.4.2 -func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) { - params := p256.Params() - bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits - counter := 0x01 - - buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey) - kdfContext := bytes.NewBuffer(buffer) - - inputKey := append([]byte("AWS4A"), []byte(secretKey)...) - - d := new(big.Int) - for { - kdfContext.Reset() - kdfContext.WriteString(accessKey) - kdfContext.WriteByte(byte(counter)) - - key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes()) - if err != nil { - return nil, err - } - - // Check key first before calling SetBytes if key key is in fact a valid candidate. - // This ensures the byte slice is the correct length (32-bytes) to compare in constant-time - cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes()) - if err != nil { - return nil, err - } - if cmp == -1 { - d.SetBytes(key) - break - } - - counter++ - if counter > 0xFF { - return nil, fmt.Errorf("exhausted single byte external counter") - } - } - d = d.Add(d, one) - - priv := new(ecdsa.PrivateKey) - priv.PublicKey.Curve = p256 - priv.D = d - priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) - - return priv, nil -} - -type httpSigner struct { - Request *http.Request - ServiceName string - RegionSet []string - Time time.Time - Credentials Credentials - IsPreSign bool - - Logger logging.Logger - Debug bool - - // PayloadHash is the hex encoded SHA-256 hash of the request payload - // If len(PayloadHash) == 0 the signer will attempt to send the request - // as an unsigned payload. Note: Unsigned payloads only work for a subset of services. - PayloadHash string - - DisableHeaderHoisting bool - DisableURIPathEscaping bool -} - -// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a. -// The passed in request will be modified in place. -func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error { - options := s.options - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - RegionSet: regionSet, - Credentials: credentials, - Time: signingTime.UTC(), - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - } - - signedRequest, err := signer.Build() - if err != nil { - return err - } - - logHTTPSigningInfo(ctx, options, signedRequest) - - return nil -} - -// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a -// Returns the presigned URL along with the headers that were signed with the request. -// -// PresignHTTP will not set the expires time of the presigned request -// automatically. To specify the expire duration for a request add the -// "X-Amz-Expires" query parameter on the request with the value as the -// duration in seconds the presigned URL should be considered valid for. This -// parameter is not used by all AWS services, and is most notable used by -// Amazon S3 APIs. -func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) { - options := s.options - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - RegionSet: regionSet, - Credentials: credentials, - Time: signingTime.UTC(), - IsPreSign: true, - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - } - - signedRequest, err := signer.Build() - if err != nil { - return "", nil, err - } - - logHTTPSigningInfo(ctx, options, signedRequest) - - signedHeaders = make(http.Header) - - // For the signed headers we canonicalize the header keys in the returned map. - // This avoids situations where can standard library double headers like host header. For example the standard - // library will set the Host header, even if it is present in lower-case form. - for k, v := range signedRequest.SignedHeaders { - key := textproto.CanonicalMIMEHeaderKey(k) - signedHeaders[key] = append(signedHeaders[key], v...) - } - - return signedRequest.Request.URL.String(), signedHeaders, nil -} - -func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { - amzDate := s.Time.Format(timeFormat) - - if s.IsPreSign { - query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) - query.Set(amzDateKey, amzDate) - query.Set(amzAlgorithmKey, signingAlgorithm) - if len(s.Credentials.SessionToken) > 0 { - query.Set(amzSecurityTokenKey, s.Credentials.SessionToken) - } - return - } - - headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) - headers.Set(amzDateKey, amzDate) - if len(s.Credentials.SessionToken) > 0 { - headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken) - } -} - -func (s *httpSigner) Build() (signedRequest, error) { - req := s.Request - - query := req.URL.Query() - headers := req.Header - - s.setRequiredSigningFields(headers, query) - - // Sort Each Query Key's Values - for key := range query { - sort.Strings(query[key]) - } - - v4Internal.SanitizeHostForHeader(req) - - credentialScope := s.buildCredentialScope() - credentialStr := s.Credentials.Context + "/" + credentialScope - if s.IsPreSign { - query.Set(amzCredentialKey, credentialStr) - } - - unsignedHeaders := headers - if s.IsPreSign && !s.DisableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders) - for k := range urlValues { - query[k] = urlValues[k] - } - } - - host := req.URL.Host - if len(req.Host) > 0 { - host = req.Host - } - - signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - - if s.IsPreSign { - query.Set(amzSignedHeadersKey, signedHeadersStr) - } - - rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) - - canonicalURI := v4Internal.GetURIPath(req.URL) - if !s.DisableURIPathEscaping { - canonicalURI = httpbinding.EscapePath(canonicalURI, false) - } - - canonicalString := s.buildCanonicalString( - req.Method, - canonicalURI, - rawQuery, - signedHeadersStr, - canonicalHeaderStr, - ) - - strToSign := s.buildStringToSign(credentialScope, canonicalString) - signingSignature, err := s.buildSignature(strToSign) - if err != nil { - return signedRequest{}, err - } - - if s.IsPreSign { - rawQuery += "&X-Amz-Signature=" + signingSignature - } else { - headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) - } - - req.URL.RawQuery = rawQuery - - return signedRequest{ - Request: req, - SignedHeaders: signedHeaders, - CanonicalString: canonicalString, - StringToSign: strToSign, - PreSigned: s.IsPreSign, - }, nil -} - -func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { - const credential = "Credential=" - const signedHeaders = "SignedHeaders=" - const signature = "Signature=" - const commaSpace = ", " - - var parts strings.Builder - parts.Grow(len(signingAlgorithm) + 1 + - len(credential) + len(credentialStr) + len(commaSpace) + - len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) + - len(signature) + len(signingSignature), - ) - parts.WriteString(signingAlgorithm) - parts.WriteRune(' ') - parts.WriteString(credential) - parts.WriteString(credentialStr) - parts.WriteString(commaSpace) - parts.WriteString(signedHeaders) - parts.WriteString(signedHeadersStr) - parts.WriteString(commaSpace) - parts.WriteString(signature) - parts.WriteString(signingSignature) - return parts.String() -} - -func (s *httpSigner) buildCredentialScope() string { - return strings.Join([]string{ - s.Time.Format(shortTimeFormat), - s.ServiceName, - "aws4_request", - }, "/") - -} - -func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} - -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { - signed = make(http.Header) - - var headers []string - const hostHeader = "host" - headers = append(headers, hostHeader) - signed[hostHeader] = append(signed[hostHeader], host) - - if length > 0 { - const contentLengthHeader = "content-length" - headers = append(headers, contentLengthHeader) - signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) - } - - for k, v := range header { - if !rule.IsValid(k) { - continue // ignored header - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := signed[lowerCaseKey]; ok { - // include additional values - signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - signed[lowerCaseKey] = v - } - sort.Strings(headers) - - signedHeaders = strings.Join(headers, ";") - - var canonicalHeaders strings.Builder - n := len(headers) - const colon = ':' - for i := 0; i < n; i++ { - if headers[i] == hostHeader { - canonicalHeaders.WriteString(hostHeader) - canonicalHeaders.WriteRune(colon) - canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) - } else { - canonicalHeaders.WriteString(headers[i]) - canonicalHeaders.WriteRune(colon) - // Trim out leading, trailing, and dedup inner spaces from signed header values. - values := signed[headers[i]] - for j, v := range values { - cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) - canonicalHeaders.WriteString(cleanedValue) - if j < len(values)-1 { - canonicalHeaders.WriteRune(',') - } - } - } - canonicalHeaders.WriteRune('\n') - } - canonicalHeadersStr = canonicalHeaders.String() - - return signed, signedHeaders, canonicalHeadersStr -} - -func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { - return strings.Join([]string{ - method, - uri, - query, - canonicalHeaders, - signedHeaders, - s.PayloadHash, - }, "\n") -} - -func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { - return strings.Join([]string{ - signingAlgorithm, - s.Time.Format(timeFormat), - credentialScope, - hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), - }, "\n") -} - -func makeHash(hash hash.Hash, b []byte) []byte { - hash.Reset() - hash.Write(b) - return hash.Sum(nil) -} - -func (s *httpSigner) buildSignature(strToSign string) (string, error) { - sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256) - if err != nil { - return "", err - } - return hex.EncodeToString(sig), nil -} - -const logSignInfoMsg = `Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func logHTTPSigningInfo(ctx context.Context, options SignerOptions, r signedRequest) { - if !options.LogSigning { - return - } - signedURLMsg := "" - if r.PreSigned { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String()) - } - logger := logging.WithContext(ctx, options.Logger) - logger.Logf(logging.Debug, logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg) -} - -type signedRequest struct { - Request *http.Request - SignedHeaders http.Header - CanonicalString string - StringToSign string - PreSigned bool -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md deleted file mode 100644 index 32c9d515746a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ /dev/null @@ -1,172 +0,0 @@ -# v1.13.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. - -# v1.12.4 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. - -# v1.12.3 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 - -# v1.12.2 (2025-01-24) - -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.12.1 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. - -# v1.12.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. - -# v1.11.5 (2024-09-20) - -* No change notes available for this release. - -# v1.11.4 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. - -# v1.11.3 (2024-06-28) - -* No change notes available for this release. - -# v1.11.2 (2024-03-29) - -* No change notes available for this release. - -# v1.11.1 (2024-02-21) - -* No change notes available for this release. - -# v1.11.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.10.4 (2023-12-07) - -* No change notes available for this release. - -# v1.10.3 (2023-11-30) - -* No change notes available for this release. - -# v1.10.2 (2023-11-29) - -* No change notes available for this release. - -# v1.10.1 (2023-11-15) - -* No change notes available for this release. - -# v1.10.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). - -# v1.9.15 (2023-10-06) - -* No change notes available for this release. - -# v1.9.14 (2023-08-18) - -* No change notes available for this release. - -# v1.9.13 (2023-08-07) - -* No change notes available for this release. - -# v1.9.12 (2023-07-31) - -* No change notes available for this release. - -# v1.9.11 (2022-12-02) - -* No change notes available for this release. - -# v1.9.10 (2022-10-24) - -* No change notes available for this release. - -# v1.9.9 (2022-09-14) - -* No change notes available for this release. - -# v1.9.8 (2022-09-02) - -* No change notes available for this release. - -# v1.9.7 (2022-08-31) - -* No change notes available for this release. - -# v1.9.6 (2022-08-29) - -* No change notes available for this release. - -# v1.9.5 (2022-08-11) - -* No change notes available for this release. - -# v1.9.4 (2022-08-09) - -* No change notes available for this release. - -# v1.9.3 (2022-06-29) - -* No change notes available for this release. - -# v1.9.2 (2022-06-07) - -* No change notes available for this release. - -# v1.9.1 (2022-03-24) - -* No change notes available for this release. - -# v1.9.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.8.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.7.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.6.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.5.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.4.0 (2021-10-21) - -* **Feature**: Updated to latest version - -# v1.3.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. - -# v1.2.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go deleted file mode 100644 index 3f451fc9b453..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go +++ /dev/null @@ -1,176 +0,0 @@ -package acceptencoding - -import ( - "compress/gzip" - "context" - "fmt" - "io" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const acceptEncodingHeaderKey = "Accept-Encoding" -const contentEncodingHeaderKey = "Content-Encoding" - -// AddAcceptEncodingGzipOptions provides the options for the -// AddAcceptEncodingGzip middleware setup. -type AddAcceptEncodingGzipOptions struct { - Enable bool -} - -// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP -// middleware to the operation stack. This allows checksums to be correctly -// computed without disabling GZIP support. -func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error { - if options.Enable { - if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil { - return err - } - if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil { - return err - } - return nil - } - - return stack.Finalize.Add(&DisableGzip{}, middleware.Before) -} - -// DisableGzip provides the middleware that will -// disable the underlying http client automatically enabling for gzip -// decompress content-encoding support. -type DisableGzip struct{} - -// ID returns the id for the middleware. -func (*DisableGzip) ID() string { - return "DisableAcceptEncodingGzip" -} - -// HandleFinalize implements the FinalizeMiddleware interface. -func (*DisableGzip) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - output middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, &smithy.SerializationError{ - Err: fmt.Errorf("unknown request type %T", input.Request), - } - } - - // Explicitly enable gzip support, this will prevent the http client from - // auto extracting the zipped content. - req.Header.Set(acceptEncodingHeaderKey, "identity") - - return next.HandleFinalize(ctx, input) -} - -// EnableGzip provides a middleware to enable support for -// gzip responses, with manual decompression. This prevents the underlying HTTP -// client from performing the gzip decompression automatically. -type EnableGzip struct{} - -// ID returns the id for the middleware. -func (*EnableGzip) ID() string { - return "AcceptEncodingGzip" -} - -// HandleFinalize implements the FinalizeMiddleware interface. -func (*EnableGzip) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - output middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, &smithy.SerializationError{ - Err: fmt.Errorf("unknown request type %T", input.Request), - } - } - - // Explicitly enable gzip support, this will prevent the http client from - // auto extracting the zipped content. - req.Header.Set(acceptEncodingHeaderKey, "gzip") - - return next.HandleFinalize(ctx, input) -} - -// DecompressGzip provides the middleware for decompressing a gzip -// response from the service. -type DecompressGzip struct{} - -// ID returns the id for the middleware. -func (*DecompressGzip) ID() string { - return "DecompressGzip" -} - -// HandleDeserialize implements the DeserializeMiddlware interface. -func (*DecompressGzip) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - output, metadata, err = next.HandleDeserialize(ctx, input) - if err != nil { - return output, metadata, err - } - - resp, ok := output.RawResponse.(*smithyhttp.Response) - if !ok { - return output, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("unknown response type %T", output.RawResponse), - } - } - if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" { - return output, metadata, err - } - - // Clear content length since it will no longer be valid once the response - // body is decompressed. - resp.Header.Del("Content-Length") - resp.ContentLength = -1 - - resp.Body = wrapGzipReader(resp.Body) - - return output, metadata, err -} - -type gzipReader struct { - reader io.ReadCloser - gzip *gzip.Reader -} - -func wrapGzipReader(reader io.ReadCloser) *gzipReader { - return &gzipReader{ - reader: reader, - } -} - -// Read wraps the gzip reader around the underlying io.Reader to extract the -// response bytes on the fly. -func (g *gzipReader) Read(b []byte) (n int, err error) { - if g.gzip == nil { - g.gzip, err = gzip.NewReader(g.reader) - if err != nil { - g.gzip = nil // ensure uninitialized gzip value isn't used in close. - return 0, fmt.Errorf("failed to decompress gzip response, %w", err) - } - } - - return g.gzip.Read(b) -} - -func (g *gzipReader) Close() error { - if g.gzip == nil { - return nil - } - - if err := g.gzip.Close(); err != nil { - g.reader.Close() - return fmt.Errorf("failed to decompress gzip response, %w", err) - } - - return g.reader.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go deleted file mode 100644 index 7056d9bf6fa3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Package acceptencoding provides customizations associated with Accept Encoding Header. - -# Accept encoding gzip - -The Go HTTP client automatically supports accept-encoding and content-encoding -gzip by default. This default behavior is not desired by the SDK, and prevents -validating the response body's checksum. To prevent this the SDK must manually -control usage of content-encoding gzip. - -To control content-encoding, the SDK must always set the `Accept-Encoding` -header to a value. This prevents the HTTP client from using gzip automatically. -When gzip is enabled on the API client, the SDK's customization will control -decompressing the gzip data in order to not break the checksum validation. When -gzip is disabled, the API client will disable gzip, preventing the HTTP -client's default behavior. - -An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using -the below middleware. The option if present can be used to enable auto decompressing -gzip by the SDK. -*/ -package acceptencoding diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go deleted file mode 100644 index f4b9f0b94886..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package acceptencoding - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md deleted file mode 100644 index 0cb8b67bfd5f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ /dev/null @@ -1,424 +0,0 @@ -# v1.8.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.5 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.4 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.3 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2025-05-22) - -* **Bug Fix**: Handle checksum for unseekable body with 0 content length - -# v1.7.1 (2025-04-28) - -* **Bug Fix**: Don't emit warnings about lack of checksum validation for non-200 responses. - -# v1.7.0 (2025-03-11) - -* **Feature**: Add extra check during output checksum validation so the validation skip warning would not be logged if object is not fetched from s3 - -# v1.6.2 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2025-02-10) - -* **Feature**: Support CRC64NVME flex checksums. - -# v1.5.6 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.5 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.4 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.3 (2025-01-24) - -* **Bug Fix**: Enable request checksum validation mode by default -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.5.2 (2025-01-17) - -* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. - -# v1.5.1 (2025-01-16) - -* **Bug Fix**: Fix nil dereference panic for operations that require checksums, but do not have an input setting for which algorithm to use. - -# v1.5.0 (2025-01-15) - -* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.4 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.38 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.37 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.36 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.35 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.34 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.33 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.32 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.31 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.30 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.29 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.28 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.27 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.26 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.25 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.24 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.23 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.22 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.21 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.20 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.19 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.18 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.17 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.16 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.15 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.14 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2022-04-27) - -* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. - -# v1.1.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2022-03-08) - -* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2022-02-24) - -* **Release**: New module for computing checksums -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go deleted file mode 100644 index dab97fb22c8f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go +++ /dev/null @@ -1,335 +0,0 @@ -package checksum - -import ( - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "hash" - "hash/crc32" - "hash/crc64" - "io" - "strings" - "sync" -) - -// Algorithm represents the checksum algorithms supported -type Algorithm string - -// Enumeration values for supported checksum Algorithms. -const ( - // AlgorithmCRC32C represents CRC32C hash algorithm - AlgorithmCRC32C Algorithm = "CRC32C" - - // AlgorithmCRC32 represents CRC32 hash algorithm - AlgorithmCRC32 Algorithm = "CRC32" - - // AlgorithmSHA1 represents SHA1 hash algorithm - AlgorithmSHA1 Algorithm = "SHA1" - - // AlgorithmSHA256 represents SHA256 hash algorithm - AlgorithmSHA256 Algorithm = "SHA256" - - // AlgorithmCRC64NVME represents CRC64NVME hash algorithm - AlgorithmCRC64NVME Algorithm = "CRC64NVME" -) - -// inverted NVME polynomial as required by crc64.MakeTable -const crc64NVME = 0x9a6c_9329_ac4b_c9b5 - -var supportedAlgorithms = []Algorithm{ - AlgorithmCRC32C, - AlgorithmCRC32, - AlgorithmSHA1, - AlgorithmSHA256, - AlgorithmCRC64NVME, -} - -func (a Algorithm) String() string { return string(a) } - -// ParseAlgorithm attempts to parse the provided value into a checksum -// algorithm, matching without case. Returns the algorithm matched, or an error -// if the algorithm wasn't matched. -func ParseAlgorithm(v string) (Algorithm, error) { - for _, a := range supportedAlgorithms { - if strings.EqualFold(string(a), v) { - return a, nil - } - } - return "", fmt.Errorf("unknown checksum algorithm, %v", v) -} - -// FilterSupportedAlgorithms filters the set of algorithms, returning a slice -// of algorithms that are supported. -func FilterSupportedAlgorithms(vs []string) []Algorithm { - found := map[Algorithm]struct{}{} - - supported := make([]Algorithm, 0, len(supportedAlgorithms)) - for _, v := range vs { - for _, a := range supportedAlgorithms { - // Only consider algorithms that are supported - if !strings.EqualFold(v, string(a)) { - continue - } - // Ignore duplicate algorithms in list. - if _, ok := found[a]; ok { - continue - } - - supported = append(supported, a) - found[a] = struct{}{} - } - } - return supported -} - -// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is -// returned if the algorithm is unknown. -func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { - switch v { - case AlgorithmSHA1: - return sha1.New(), nil - case AlgorithmSHA256: - return sha256.New(), nil - case AlgorithmCRC32: - return crc32.NewIEEE(), nil - case AlgorithmCRC32C: - return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil - case AlgorithmCRC64NVME: - return crc64.New(crc64.MakeTable(crc64NVME)), nil - default: - return nil, fmt.Errorf("unknown checksum algorithm, %v", v) - } -} - -// AlgorithmChecksumLength returns the length of the algorithm's checksum in -// bytes. If the algorithm is not known, an error is returned. -func AlgorithmChecksumLength(v Algorithm) (int, error) { - switch v { - case AlgorithmSHA1: - return sha1.Size, nil - case AlgorithmSHA256: - return sha256.Size, nil - case AlgorithmCRC32: - return crc32.Size, nil - case AlgorithmCRC32C: - return crc32.Size, nil - case AlgorithmCRC64NVME: - return crc64.Size, nil - default: - return 0, fmt.Errorf("unknown checksum algorithm, %v", v) - } -} - -const awsChecksumHeaderPrefix = "x-amz-checksum-" - -// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash. -func AlgorithmHTTPHeader(v Algorithm) string { - return awsChecksumHeaderPrefix + strings.ToLower(string(v)) -} - -// base64EncodeHashSum computes base64 encoded checksum of a given running -// hash. The running hash must already have content written to it. Returns the -// byte slice of checksum and an error -func base64EncodeHashSum(h hash.Hash) []byte { - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - return sum64 -} - -// hexEncodeHashSum computes hex encoded checksum of a given running hash. The -// running hash must already have content written to it. Returns the byte slice -// of checksum and an error -func hexEncodeHashSum(h hash.Hash) []byte { - sum := h.Sum(nil) - sumHex := make([]byte, hex.EncodedLen(len(sum))) - hex.Encode(sumHex, sum) - return sumHex -} - -// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents. -// Returns the byte slice of MD5 checksum and an error. -func computeMD5Checksum(r io.Reader) ([]byte, error) { - h := md5.New() - - // Copy errors may be assumed to be from the body. - if _, err := io.Copy(h, r); err != nil { - return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err) - } - - // Encode the MD5 checksum in base64. - return base64EncodeHashSum(h), nil -} - -// computeChecksumReader provides a reader wrapping an underlying io.Reader to -// compute the checksum of the stream's bytes. -type computeChecksumReader struct { - stream io.Reader - algorithm Algorithm - hasher hash.Hash - base64ChecksumLen int - - mux sync.RWMutex - lockedChecksum string - lockedErr error -} - -// newComputeChecksumReader returns a computeChecksumReader for the stream and -// algorithm specified. Returns error if unable to create the reader, or -// algorithm is unknown. -func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) { - hasher, err := NewAlgorithmHash(algorithm) - if err != nil { - return nil, err - } - - checksumLength, err := AlgorithmChecksumLength(algorithm) - if err != nil { - return nil, err - } - - return &computeChecksumReader{ - stream: io.TeeReader(stream, hasher), - algorithm: algorithm, - hasher: hasher, - base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength), - }, nil -} - -// Read wraps the underlying reader. When the underlying reader returns EOF, -// the checksum of the reader will be computed, and can be retrieved with -// ChecksumBase64String. -func (r *computeChecksumReader) Read(p []byte) (int, error) { - n, err := r.stream.Read(p) - if err == nil { - return n, nil - } else if err != io.EOF { - r.mux.Lock() - defer r.mux.Unlock() - - r.lockedErr = err - return n, err - } - - b := base64EncodeHashSum(r.hasher) - - r.mux.Lock() - defer r.mux.Unlock() - - r.lockedChecksum = string(b) - - return n, err -} - -func (r *computeChecksumReader) Algorithm() Algorithm { - return r.algorithm -} - -// Base64ChecksumLength returns the base64 encoded length of the checksum for -// algorithm. -func (r *computeChecksumReader) Base64ChecksumLength() int { - return r.base64ChecksumLen -} - -// Base64Checksum returns the base64 checksum for the algorithm, or error if -// the underlying reader returned a non-EOF error. -// -// Safe to be called concurrently, but will return an error until after the -// underlying reader is returns EOF. -func (r *computeChecksumReader) Base64Checksum() (string, error) { - r.mux.RLock() - defer r.mux.RUnlock() - - if r.lockedErr != nil { - return "", r.lockedErr - } - - if r.lockedChecksum == "" { - return "", fmt.Errorf( - "checksum not available yet, called before reader returns EOF", - ) - } - - return r.lockedChecksum, nil -} - -// validateChecksumReader implements io.ReadCloser interface. The wrapper -// performs checksum validation when the underlying reader has been fully read. -type validateChecksumReader struct { - originalBody io.ReadCloser - body io.Reader - hasher hash.Hash - algorithm Algorithm - expectChecksum string -} - -// newValidateChecksumReader returns a configured io.ReadCloser that performs -// checksum validation when the underlying reader has been fully read. -func newValidateChecksumReader( - body io.ReadCloser, - algorithm Algorithm, - expectChecksum string, -) (*validateChecksumReader, error) { - hasher, err := NewAlgorithmHash(algorithm) - if err != nil { - return nil, err - } - - return &validateChecksumReader{ - originalBody: body, - body: io.TeeReader(body, hasher), - hasher: hasher, - algorithm: algorithm, - expectChecksum: expectChecksum, - }, nil -} - -// Read attempts to read from the underlying stream while also updating the -// running hash. If the underlying stream returns with an EOF error, the -// checksum of the stream will be collected, and compared against the expected -// checksum. If the checksums do not match, an error will be returned. -// -// If a non-EOF error occurs when reading the underlying stream, that error -// will be returned and the checksum for the stream will be discarded. -func (c *validateChecksumReader) Read(p []byte) (n int, err error) { - n, err = c.body.Read(p) - if err == io.EOF { - if checksumErr := c.validateChecksum(); checksumErr != nil { - return n, checksumErr - } - } - - return n, err -} - -// Close closes the underlying reader, returning any error that occurred in the -// underlying reader. -func (c *validateChecksumReader) Close() (err error) { - return c.originalBody.Close() -} - -func (c *validateChecksumReader) validateChecksum() error { - // Compute base64 encoded checksum hash of the payload's read bytes. - v := base64EncodeHashSum(c.hasher) - if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) { - return validationError{ - Algorithm: c.algorithm, Expect: e, Actual: a, - } - } - - return nil -} - -type validationError struct { - Algorithm Algorithm - Expect string - Actual string -} - -func (v validationError) Error() string { - return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v", - v.Algorithm, v.Expect, v.Actual) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go deleted file mode 100644 index 3bd320c43769..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go +++ /dev/null @@ -1,389 +0,0 @@ -package checksum - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" -) - -const ( - crlf = "\r\n" - - // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html - defaultChunkLength = 1024 * 64 - - awsTrailerHeaderName = "x-amz-trailer" - decodedContentLengthHeaderName = "x-amz-decoded-content-length" - - contentEncodingHeaderName = "content-encoding" - awsChunkedContentEncodingHeaderValue = "aws-chunked" - - trailerKeyValueSeparator = ":" -) - -var ( - crlfBytes = []byte(crlf) - finalChunkBytes = []byte("0" + crlf) -) - -type awsChunkedEncodingOptions struct { - // The total size of the stream. For unsigned encoding this implies that - // there will only be a single chunk containing the underlying payload, - // unless ChunkLength is also specified. - StreamLength int64 - - // Set of trailer key:value pairs that will be appended to the end of the - // payload after the end chunk has been written. - Trailers map[string]awsChunkedTrailerValue - - // The maximum size of each chunk to be sent. Default value of -1, signals - // that optimal chunk length will be used automatically. ChunkSize must be - // at least 8KB. - // - // If ChunkLength and StreamLength are both specified, the stream will be - // broken up into ChunkLength chunks. The encoded length of the aws-chunked - // encoding can still be determined as long as all trailers, if any, have a - // fixed length. - ChunkLength int -} - -type awsChunkedTrailerValue struct { - // Function to retrieve the value of the trailer. Will only be called after - // the underlying stream returns EOF error. - Get func() (string, error) - - // If the length of the value can be pre-determined, and is constant - // specify the length. A value of -1 means the length is unknown, or - // cannot be pre-determined. - Length int -} - -// awsChunkedEncoding provides a reader that wraps the payload such that -// payload is read as a single aws-chunk payload. This reader can only be used -// if the content length of payload is known. Content-Length is used as size of -// the single payload chunk. The final chunk and trailing checksum is appended -// at the end. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition -// -// Here is the aws-chunked payload stream as read from the awsChunkedEncoding -// if original request stream is "Hello world", and checksum hash used is SHA256 -// -// \r\n -// Hello world\r\n -// 0\r\n -// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n -// \r\n -type awsChunkedEncoding struct { - options awsChunkedEncodingOptions - - encodedStream io.Reader - trailerEncodedLength int -} - -// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured -// for unsigned aws-chunked content encoding. Any additional trailers that need -// to be appended after the end chunk must be included as via Trailer -// callbacks. -func newUnsignedAWSChunkedEncoding( - stream io.Reader, - optFns ...func(*awsChunkedEncodingOptions), -) *awsChunkedEncoding { - options := awsChunkedEncodingOptions{ - Trailers: map[string]awsChunkedTrailerValue{}, - StreamLength: -1, - ChunkLength: -1, - } - for _, fn := range optFns { - fn(&options) - } - - var chunkReader io.Reader - if options.ChunkLength != -1 || options.StreamLength == -1 { - if options.ChunkLength == -1 { - options.ChunkLength = defaultChunkLength - } - chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength) - } else { - chunkReader = newUnsignedChunkReader(stream, options.StreamLength) - } - - trailerReader := newAWSChunkedTrailerReader(options.Trailers) - - return &awsChunkedEncoding{ - options: options, - encodedStream: io.MultiReader(chunkReader, - trailerReader, - bytes.NewBuffer(crlfBytes), - ), - trailerEncodedLength: trailerReader.EncodedLength(), - } -} - -// EncodedLength returns the final length of the aws-chunked content encoded -// stream if it can be determined without reading the underlying stream or lazy -// header values, otherwise -1 is returned. -func (e *awsChunkedEncoding) EncodedLength() int64 { - var length int64 - if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 { - return -1 - } - - if e.options.StreamLength != 0 { - // If the stream length is known, and there is no chunk length specified, - // only a single chunk will be used. Otherwise the stream length needs to - // include the multiple chunk padding content. - if e.options.ChunkLength == -1 { - length += getUnsignedChunkBytesLength(e.options.StreamLength) - - } else { - // Compute chunk header and payload length - numChunks := e.options.StreamLength / int64(e.options.ChunkLength) - length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength)) - if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 { - length += getUnsignedChunkBytesLength(remainder) - } - } - } - - // End chunk - length += int64(len(finalChunkBytes)) - - // Trailers - length += int64(e.trailerEncodedLength) - - // Encoding terminator - length += int64(len(crlf)) - - return length -} - -func getUnsignedChunkBytesLength(payloadLength int64) int64 { - payloadLengthStr := strconv.FormatInt(payloadLength, 16) - return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf)) -} - -// HTTPHeaders returns the set of headers that must be included the request for -// aws-chunked to work. This includes the content-encoding: aws-chunked header. -// -// If there are multiple layered content encoding, the aws-chunked encoding -// must be appended to the previous layers the stream's encoding. The best way -// to do this is to append all header values returned to the HTTP request's set -// of headers. -func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string { - headers := map[string][]string{ - contentEncodingHeaderName: { - awsChunkedContentEncodingHeaderValue, - }, - } - - if len(e.options.Trailers) != 0 { - trailers := make([]string, 0, len(e.options.Trailers)) - for name := range e.options.Trailers { - trailers = append(trailers, strings.ToLower(name)) - } - headers[awsTrailerHeaderName] = trailers - } - - return headers -} - -func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) { - return e.encodedStream.Read(b) -} - -// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked -// content encoded trailers. The trailer values will not be retrieved until the -// reader is read from. -type awsChunkedTrailerReader struct { - reader *bytes.Buffer - trailers map[string]awsChunkedTrailerValue - trailerEncodedLength int -} - -// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to -// lazy reading aws-chunk content encoded trailers. -func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader { - return &awsChunkedTrailerReader{ - trailers: trailers, - trailerEncodedLength: trailerEncodedLength(trailers), - } -} - -func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) { - for name, trailer := range trailers { - length += len(name) + len(trailerKeyValueSeparator) - l := trailer.Length - if l == -1 { - return -1 - } - length += l + len(crlf) - } - - return length -} - -// EncodedLength returns the length of the encoded trailers if the length could -// be determined without retrieving the header values. Returns -1 if length is -// unknown. -func (r *awsChunkedTrailerReader) EncodedLength() (length int) { - return r.trailerEncodedLength -} - -// Read populates the passed in byte slice with bytes from the encoded -// trailers. Will lazy read header values first time Read is called. -func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) { - if r.trailerEncodedLength == 0 { - return 0, io.EOF - } - - if r.reader == nil { - trailerLen := r.trailerEncodedLength - if r.trailerEncodedLength == -1 { - trailerLen = 0 - } - r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen)) - for name, trailer := range r.trailers { - r.reader.WriteString(name) - r.reader.WriteString(trailerKeyValueSeparator) - v, err := trailer.Get() - if err != nil { - return 0, fmt.Errorf("failed to get trailer value, %w", err) - } - r.reader.WriteString(v) - r.reader.WriteString(crlf) - } - } - - return r.reader.Read(p) -} - -// newUnsignedChunkReader returns an io.Reader encoding the underlying reader -// as unsigned aws-chunked chunks. The returned reader will also include the -// end chunk, but not the aws-chunked final `crlf` segment so trailers can be -// added. -// -// If the payload size is -1 for unknown length the content will be buffered in -// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding. -func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader { - if payloadSize == -1 { - return newBufferedAWSChunkReader(reader, defaultChunkLength) - } - - var endChunk bytes.Buffer - if payloadSize == 0 { - endChunk.Write(finalChunkBytes) - return &endChunk - } - - endChunk.WriteString(crlf) - endChunk.Write(finalChunkBytes) - - var header bytes.Buffer - header.WriteString(strconv.FormatInt(payloadSize, 16)) - header.WriteString(crlf) - return io.MultiReader( - &header, - reader, - &endChunk, - ) -} - -// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader. -// Will include end chunk, but not the aws-chunked final `crlf` segment so -// trailers can be added. -// -// Note does not implement support for chunk extensions, e.g. chunk signing. -type bufferedAWSChunkReader struct { - reader io.Reader - chunkSize int - chunkSizeStr string - - headerBuffer *bytes.Buffer - chunkBuffer *bytes.Buffer - - multiReader io.Reader - multiReaderLen int - endChunkDone bool -} - -// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading -// aws-chunked encoded chunks. -func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader { - return &bufferedAWSChunkReader{ - reader: reader, - chunkSize: chunkSize, - chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16), - - headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)), - chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))), - } -} - -// Read attempts to read from the underlying io.Reader writing aws-chunked -// chunk encoded bytes to p. When the underlying io.Reader has been completed -// read the end chunk will be available. Once the end chunk is read, the reader -// will return EOF. -func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) { - if r.multiReaderLen == 0 && r.endChunkDone { - return 0, io.EOF - } - if r.multiReader == nil || r.multiReaderLen == 0 { - r.multiReader, r.multiReaderLen, err = r.newMultiReader() - if err != nil { - return 0, err - } - } - - n, err = r.multiReader.Read(p) - r.multiReaderLen -= n - - if err == io.EOF && !r.endChunkDone { - // Edge case handling when the multi-reader has been completely read, - // and returned an EOF, make sure that EOF only gets returned if the - // end chunk was included in the multi-reader. Otherwise, the next call - // to read will initialize the next chunk's multi-reader. - err = nil - } - return n, err -} - -// newMultiReader returns a new io.Reader for wrapping the next chunk. Will -// return an error if the underlying reader can not be read from. Will never -// return io.EOF. -func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) { - // io.Copy eats the io.EOF returned by io.LimitReader. Any error that - // occurs here is due to an actual read error. - n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize))) - if err != nil { - return nil, 0, err - } - if n == 0 { - // Early exit writing out only the end chunk. This does not include - // aws-chunk's final `crlf` so that trailers can still be added by - // upstream reader. - r.headerBuffer.Reset() - r.headerBuffer.WriteString("0") - r.headerBuffer.WriteString(crlf) - r.endChunkDone = true - - return r.headerBuffer, r.headerBuffer.Len(), nil - } - r.chunkBuffer.WriteString(crlf) - - chunkSizeStr := r.chunkSizeStr - if int(n) != r.chunkSize { - chunkSizeStr = strconv.FormatInt(n, 16) - } - - r.headerBuffer.Reset() - r.headerBuffer.WriteString(chunkSizeStr) - r.headerBuffer.WriteString(crlf) - - return io.MultiReader( - r.headerBuffer, - r.chunkBuffer, - ), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go deleted file mode 100644 index df89189f6e08..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package checksum - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go deleted file mode 100644 index 274d649fb53c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go +++ /dev/null @@ -1,175 +0,0 @@ -package checksum - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/middleware" -) - -// InputMiddlewareOptions provides the options for the request -// checksum middleware setup. -type InputMiddlewareOptions struct { - // GetAlgorithm is a function to get the checksum algorithm of the - // input payload from the input parameters. - // - // Given the input parameter value, the function must return the algorithm - // and true, or false if no algorithm is specified. - GetAlgorithm func(interface{}) (string, bool) - - // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. - // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, - // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequireChecksum bool - - // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is - // set to true, checksum will be calculated and this field will be ignored, otherwise - // RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Enables support for wrapping the serialized input payload with a - // content-encoding: aws-check wrapper, and including a trailer for the - // algorithm's checksum value. - // - // The checksum will not be computed, nor added as trailing checksum, if - // the Algorithm's header is already set on the request. - EnableTrailingChecksum bool - - // Enables support for computing the SHA256 checksum of input payloads - // along with the algorithm specified checksum. Prevents downstream - // middleware handlers (computePayloadSHA256) re-reading the payload. - // - // The SHA256 payload checksum will only be used for computed for requests - // that are not TLS, or do not enable trailing checksums. - // - // The SHA256 payload hash will not be computed, if the Algorithm's header - // is already set on the request. - EnableComputeSHA256PayloadHash bool - - // Enables support for setting the aws-chunked decoded content length - // header for the decoded length of the underlying stream. Will only be set - // when used with trailing checksums, and aws-chunked content-encoding. - EnableDecodedContentLengthHeader bool -} - -// AddInputMiddleware adds the middleware for performing checksum computing -// of request payloads, and checksum validation of response payloads. -// -// Deprecated: This internal-only runtime API is frozen. Do not call or modify -// it in new code. Checksum-enabled service operations now generate this -// middleware setup code inline per #2507. -func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { - // Initial checksum configuration look up middleware - err = stack.Initialize.Add(&SetupInputContext{ - GetAlgorithm: options.GetAlgorithm, - RequireChecksum: options.RequireChecksum, - RequestChecksumCalculation: options.RequestChecksumCalculation, - }, middleware.Before) - if err != nil { - return err - } - - stack.Build.Remove("ContentChecksum") - - inputChecksum := &ComputeInputPayloadChecksum{ - EnableTrailingChecksum: options.EnableTrailingChecksum, - EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, - EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, - } - if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { - return err - } - - // If trailing checksum is not supported no need for finalize handler to be added. - if options.EnableTrailingChecksum { - trailerMiddleware := &AddInputChecksumTrailer{ - EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, - EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, - EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, - } - if err := stack.Finalize.Insert(trailerMiddleware, "Retry", middleware.After); err != nil { - return err - } - } - - return nil -} - -// RemoveInputMiddleware Removes the compute input payload checksum middleware -// handlers from the stack. -func RemoveInputMiddleware(stack *middleware.Stack) { - id := (*SetupInputContext)(nil).ID() - stack.Initialize.Remove(id) - - id = (*ComputeInputPayloadChecksum)(nil).ID() - stack.Finalize.Remove(id) -} - -// OutputMiddlewareOptions provides options for configuring output checksum -// validation middleware. -type OutputMiddlewareOptions struct { - // GetValidationMode is a function to get the checksum validation - // mode of the output payload from the input parameters. - // - // Given the input parameter value, the function must return the validation - // mode and true, or false if no mode is specified. - GetValidationMode func(interface{}) (string, bool) - - // SetValidationMode is a function to set the checksum validation mode of input parameters - SetValidationMode func(interface{}, string) - - // ResponseChecksumValidation is the user config to opt-in/out response checksum validation - ResponseChecksumValidation aws.ResponseChecksumValidation - - // The set of checksum algorithms that should be used for response payload - // checksum validation. The algorithm(s) used will be a union of the - // output's returned algorithms and this set. - // - // Only the first algorithm in the union is currently used. - ValidationAlgorithms []string - - // If set the middleware will ignore output multipart checksums. Otherwise - // a checksum format error will be returned by the middleware. - IgnoreMultipartValidation bool - - // When set the middleware will log when output does not have checksum or - // algorithm to validate. - LogValidationSkipped bool - - // When set the middleware will log when the output contains a multipart - // checksum that was, skipped and not validated. - LogMultipartValidationSkipped bool -} - -// AddOutputMiddleware adds the middleware for validating response payload's -// checksum. -func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { - err := stack.Initialize.Add(&setupOutputContext{ - GetValidationMode: options.GetValidationMode, - SetValidationMode: options.SetValidationMode, - ResponseChecksumValidation: options.ResponseChecksumValidation, - }, middleware.Before) - if err != nil { - return err - } - - // Resolve a supported priority order list of algorithms to validate. - algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms) - - m := &validateOutputPayloadChecksum{ - Algorithms: algorithms, - IgnoreMultipartValidation: options.IgnoreMultipartValidation, - LogMultipartValidationSkipped: options.LogMultipartValidationSkipped, - LogValidationSkipped: options.LogValidationSkipped, - } - - return stack.Deserialize.Add(m, middleware.After) -} - -// RemoveOutputMiddleware Removes the compute input payload checksum middleware -// handlers from the stack. -func RemoveOutputMiddleware(stack *middleware.Stack) { - id := (*setupOutputContext)(nil).ID() - stack.Initialize.Remove(id) - - id = (*validateOutputPayloadChecksum)(nil).ID() - stack.Deserialize.Remove(id) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go deleted file mode 100644 index 861a44293b1d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go +++ /dev/null @@ -1,90 +0,0 @@ -package checksum - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -var supportedChecksumFeatures = map[Algorithm]awsmiddleware.UserAgentFeature{ - AlgorithmCRC32: awsmiddleware.UserAgentFeatureRequestChecksumCRC32, - AlgorithmCRC32C: awsmiddleware.UserAgentFeatureRequestChecksumCRC32C, - AlgorithmSHA1: awsmiddleware.UserAgentFeatureRequestChecksumSHA1, - AlgorithmSHA256: awsmiddleware.UserAgentFeatureRequestChecksumSHA256, - AlgorithmCRC64NVME: awsmiddleware.UserAgentFeatureRequestChecksumCRC64, -} - -// RequestChecksumMetricsTracking is the middleware to track operation request's checksum usage -type RequestChecksumMetricsTracking struct { - RequestChecksumCalculation aws.RequestChecksumCalculation - UserAgent *awsmiddleware.RequestUserAgent -} - -// ID provides the middleware identifier -func (m *RequestChecksumMetricsTracking) ID() string { - return "AWSChecksum:RequestMetricsTracking" -} - -// HandleBuild checks request checksum config and checksum value sent -// and sends corresponding feature id to user agent -func (m *RequestChecksumMetricsTracking) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - switch m.RequestChecksumCalculation { - case aws.RequestChecksumCalculationWhenSupported: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenSupported) - case aws.RequestChecksumCalculationWhenRequired: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenRequired) - } - - for algo, feat := range supportedChecksumFeatures { - checksumHeader := AlgorithmHTTPHeader(algo) - if checksum := req.Header.Get(checksumHeader); checksum != "" { - m.UserAgent.AddUserAgentFeature(feat) - } - } - - return next.HandleBuild(ctx, in) -} - -// ResponseChecksumMetricsTracking is the middleware to track operation response's checksum usage -type ResponseChecksumMetricsTracking struct { - ResponseChecksumValidation aws.ResponseChecksumValidation - UserAgent *awsmiddleware.RequestUserAgent -} - -// ID provides the middleware identifier -func (m *ResponseChecksumMetricsTracking) ID() string { - return "AWSChecksum:ResponseMetricsTracking" -} - -// HandleBuild checks the response checksum config and sends corresponding feature id to user agent -func (m *ResponseChecksumMetricsTracking) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - switch m.ResponseChecksumValidation { - case aws.ResponseChecksumValidationWhenSupported: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenSupported) - case aws.ResponseChecksumValidationWhenRequired: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenRequired) - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go deleted file mode 100644 index 31853839c765..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go +++ /dev/null @@ -1,430 +0,0 @@ -package checksum - -import ( - "context" - "crypto/sha256" - "fmt" - "hash" - "io" - "strconv" - "strings" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" -) - -// computedInputChecksumsKey is the metadata key for recording the algorithm the -// checksum was computed for and the checksum value. -type computedInputChecksumsKey struct{} - -// GetComputedInputChecksums returns the map of checksum algorithm to their -// computed value stored in the middleware Metadata. Returns false if no values -// were stored in the Metadata. -func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) { - vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string) - return vs, ok -} - -// SetComputedInputChecksums stores the map of checksum algorithm to their -// computed value in the middleware Metadata. Overwrites any values that -// currently exist in the metadata. -func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { - m.Set(computedInputChecksumsKey{}, vs) -} - -// ComputeInputPayloadChecksum middleware computes payload checksum -type ComputeInputPayloadChecksum struct { - // Enables support for wrapping the serialized input payload with a - // content-encoding: aws-check wrapper, and including a trailer for the - // algorithm's checksum value. - // - // The checksum will not be computed, nor added as trailing checksum, if - // the Algorithm's header is already set on the request. - EnableTrailingChecksum bool - - // Enables support for computing the SHA256 checksum of input payloads - // along with the algorithm specified checksum. Prevents downstream - // middleware handlers (computePayloadSHA256) re-reading the payload. - // - // The SHA256 payload hash will only be used for computed for requests - // that are not TLS, or do not enable trailing checksums. - // - // The SHA256 payload hash will not be computed, if the Algorithm's header - // is already set on the request. - EnableComputePayloadHash bool - - // Enables support for setting the aws-chunked decoded content length - // header for the decoded length of the underlying stream. Will only be set - // when used with trailing checksums, and aws-chunked content-encoding. - EnableDecodedContentLengthHeader bool - - useTrailer bool -} - -type useTrailer struct{} - -// ID provides the middleware's identifier. -func (m *ComputeInputPayloadChecksum) ID() string { - return "AWSChecksum:ComputeInputPayloadChecksum" -} - -type computeInputHeaderChecksumError struct { - Msg string - Err error -} - -func (e computeInputHeaderChecksumError) Error() string { - const intro = "compute input header checksum failed" - - if e.Err != nil { - return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) - } - - return fmt.Sprintf("%s, %s", intro, e.Msg) -} -func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } - -// HandleFinalize handles computing the payload's checksum, in the following cases: -// - Is HTTP, not HTTPS -// - RequireChecksum is true, and no checksums were specified via the Input -// - Trailing checksums are not supported -// -// The build handler must be inserted in the stack before ContentPayloadHash -// and after ComputeContentLength. -func (m *ComputeInputPayloadChecksum) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - var checksum string - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, err - } - if !ok { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, computeInputHeaderChecksumError{ - Msg: fmt.Sprintf("unknown request type %T", req), - } - } - - defer func() { - if algorithm == "" || checksum == "" || err != nil { - return - } - - // Record the checksum and algorithm that was computed - SetComputedInputChecksums(&metadata, map[string]string{ - string(algorithm): checksum, - }) - }() - - // If any checksum header is already set nothing to do. - for header := range req.Header { - h := strings.ToUpper(header) - if strings.HasPrefix(h, "X-AMZ-CHECKSUM-") { - algorithm = Algorithm(strings.TrimPrefix(h, "X-AMZ-CHECKSUM-")) - checksum = req.Header.Get(header) - return next.HandleFinalize(ctx, in) - } - } - - computePayloadHash := m.EnableComputePayloadHash - if v := v4.GetPayloadHash(ctx); v != "" { - computePayloadHash = false - } - - stream := req.GetStream() - streamLength, err := getRequestStreamLength(req) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to determine stream length", - Err: err, - } - } - - // If trailing checksums are supported, the request is HTTPS, and the - // stream is not nil or empty, instead switch to a trailing checksum. - // - // Nil and empty streams will always be handled as a request header, - // regardless if the operation supports trailing checksums or not. - if req.IsHTTPS() && !presignedurlcust.GetIsPresigning(ctx) { - if stream != nil && streamLength != 0 && m.EnableTrailingChecksum { - if m.EnableComputePayloadHash { - // ContentSHA256Header middleware handles the header - ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash) - } - m.useTrailer = true - ctx = middleware.WithStackValue(ctx, useTrailer{}, true) - return next.HandleFinalize(ctx, in) - } - - // If trailing checksums are not enabled but protocol is still HTTPS - // disabling computing the payload hash. Downstream middleware handler - // (ComputetPayloadHash) will set the payload hash to unsigned payload, - // if signing was used. - computePayloadHash = false - } - - // Only seekable streams are supported for non-trailing checksums, because - // the stream needs to be rewound before the handler can continue. - if stream != nil && !req.IsStreamSeekable() && streamLength != 0 { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "unseekable stream is not supported without TLS and trailing checksum", - } - } - - var sha256Checksum string - checksum, sha256Checksum, err = computeStreamChecksum( - algorithm, stream, computePayloadHash) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to compute stream checksum", - Err: err, - } - } - // only attempt rewind if the stream length has been determined and is non-zero - if streamLength > 0 { - if err := req.RewindStream(); err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to rewind stream", - Err: err, - } - } - } - - checksumHeader := AlgorithmHTTPHeader(algorithm) - req.Header.Set(checksumHeader, checksum) - - if computePayloadHash { - ctx = v4.SetPayloadHash(ctx, sha256Checksum) - } - - return next.HandleFinalize(ctx, in) -} - -type computeInputTrailingChecksumError struct { - Msg string - Err error -} - -func (e computeInputTrailingChecksumError) Error() string { - const intro = "compute input trailing checksum failed" - - if e.Err != nil { - return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) - } - - return fmt.Sprintf("%s, %s", intro, e.Msg) -} -func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } - -// AddInputChecksumTrailer adds HTTP checksum when -// - Is HTTPS, not HTTP -// - A checksum was specified via the Input -// - Trailing checksums are supported. -type AddInputChecksumTrailer struct { - EnableTrailingChecksum bool - EnableComputePayloadHash bool - EnableDecodedContentLengthHeader bool -} - -// ID identifies this middleware. -func (*AddInputChecksumTrailer) ID() string { - return "addInputChecksumTrailer" -} - -// HandleFinalize wraps the request body to write the trailing checksum. -func (m *AddInputChecksumTrailer) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to get algorithm", - Err: err, - } - } else if !ok { - return next.HandleFinalize(ctx, in) - } - - if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { - return next.HandleFinalize(ctx, in) - } - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, computeInputTrailingChecksumError{ - Msg: fmt.Sprintf("unknown request type %T", req), - } - } - - // Trailing checksums are only supported when TLS is enabled. - if !req.IsHTTPS() { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "HTTPS required", - } - } - - // If any checksum header is already set nothing to do. - for header := range req.Header { - if strings.HasPrefix(strings.ToLower(header), "x-amz-checksum-") { - return next.HandleFinalize(ctx, in) - } - } - - stream := req.GetStream() - streamLength, err := getRequestStreamLength(req) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to determine stream length", - Err: err, - } - } - - if stream == nil || streamLength == 0 { - // Nil and empty streams are handled by the Build handler. They are not - // supported by the trailing checksums finalize handler. There is no - // benefit to sending them as trailers compared to headers. - return out, metadata, computeInputTrailingChecksumError{ - Msg: "nil or empty streams are not supported", - } - } - - checksumReader, err := newComputeChecksumReader(stream, algorithm) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to created checksum reader", - Err: err, - } - } - - awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader, - func(o *awsChunkedEncodingOptions) { - o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{ - Get: checksumReader.Base64Checksum, - Length: checksumReader.Base64ChecksumLength(), - } - o.StreamLength = streamLength - }) - - for key, values := range awsChunkedReader.HTTPHeaders() { - for _, value := range values { - req.Header.Add(key, value) - } - } - - // Setting the stream on the request will create a copy. The content length - // is not updated until after the request is copied to prevent impacting - // upstream middleware. - req, err = req.SetStream(awsChunkedReader) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed updating request to trailing checksum wrapped stream", - Err: err, - } - } - req.ContentLength = awsChunkedReader.EncodedLength() - in.Request = req - - // Add decoded content length header if original stream's content length is known. - if streamLength != -1 && m.EnableDecodedContentLengthHeader { - req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10)) - } - - out, metadata, err = next.HandleFinalize(ctx, in) - if err == nil { - checksum, err := checksumReader.Base64Checksum() - if err != nil { - return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err) - } - - // Record the checksum and algorithm that was computed - SetComputedInputChecksums(&metadata, map[string]string{ - string(algorithm): checksum, - }) - } - - return out, metadata, err -} - -func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { - ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx) - if ctxAlgorithm == "" { - return "", false, nil - } - - algorithm, err := ParseAlgorithm(ctxAlgorithm) - if err != nil { - return "", false, fmt.Errorf( - "failed to parse algorithm, %w", err) - } - - return algorithm, true, nil -} - -func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) ( - checksum string, sha256Checksum string, err error, -) { - hasher, err := NewAlgorithmHash(algorithm) - if err != nil { - return "", "", fmt.Errorf( - "failed to get hasher for checksum algorithm, %w", err) - } - - var sha256Hasher hash.Hash - var batchHasher io.Writer = hasher - - // Compute payload hash for the protocol. To prevent another handler - // (computePayloadSHA256) re-reading body also compute the SHA256 for - // request signing. If configured checksum algorithm is SHA256, don't - // double wrap stream with another SHA256 hasher. - if computePayloadHash && algorithm != AlgorithmSHA256 { - sha256Hasher = sha256.New() - batchHasher = io.MultiWriter(hasher, sha256Hasher) - } - - if stream != nil { - if _, err = io.Copy(batchHasher, stream); err != nil { - return "", "", fmt.Errorf( - "failed to read stream to compute hash, %w", err) - } - } - - checksum = string(base64EncodeHashSum(hasher)) - if computePayloadHash { - if algorithm != AlgorithmSHA256 { - sha256Checksum = string(hexEncodeHashSum(sha256Hasher)) - } else { - sha256Checksum = string(hexEncodeHashSum(hasher)) - } - } - - return checksum, sha256Checksum, nil -} - -func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { - if v := req.ContentLength; v >= 0 { - return v, nil - } - - if length, ok, err := req.StreamLength(); err != nil { - return 0, fmt.Errorf("failed getting request stream's length, %w", err) - } else if ok { - return length, nil - } - - return -1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go deleted file mode 100644 index 3347e88ccebe..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go +++ /dev/null @@ -1,122 +0,0 @@ -package checksum - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -const ( - checksumValidationModeEnabled = "ENABLED" -) - -// SetupInputContext is the initial middleware that looks up the input -// used to configure checksum behavior. This middleware must be executed before -// input validation step or any other checksum middleware. -type SetupInputContext struct { - // GetAlgorithm is a function to get the checksum algorithm of the - // input payload from the input parameters. - // - // Given the input parameter value, the function must return the algorithm - // and true, or false if no algorithm is specified. - GetAlgorithm func(interface{}) (string, bool) - - // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. - // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, - // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequireChecksum bool - - // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is - // set to true, checksum will be calculated and this field will be ignored, otherwise - // RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation -} - -// ID for the middleware -func (m *SetupInputContext) ID() string { - return "AWSChecksum:SetupInputContext" -} - -// HandleInitialize initialization middleware that setups up the checksum -// context based on the input parameters provided in the stack. -func (m *SetupInputContext) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - // nil check here is for operations that require checksum but do not have input algorithm setting - if m.GetAlgorithm != nil { - if algorithm, ok := m.GetAlgorithm(in.Parameters); ok { - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) - return next.HandleInitialize(ctx, in) - } - } - - if m.RequireChecksum || m.RequestChecksumCalculation == aws.RequestChecksumCalculationWhenSupported { - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) - } - - return next.HandleInitialize(ctx, in) -} - -type setupOutputContext struct { - // GetValidationMode is a function to get the checksum validation - // mode of the output payload from the input parameters. - // - // Given the input parameter value, the function must return the validation - // mode and true, or false if no mode is specified. - GetValidationMode func(interface{}) (string, bool) - - // SetValidationMode is a function to set the checksum validation mode of input parameters - SetValidationMode func(interface{}, string) - - // ResponseChecksumValidation states user config to opt-in/out checksum validation - ResponseChecksumValidation aws.ResponseChecksumValidation -} - -// ID for the middleware -func (m *setupOutputContext) ID() string { - return "AWSChecksum:SetupOutputContext" -} - -// HandleInitialize initialization middleware that setups up the checksum -// context based on the input parameters provided in the stack. -func (m *setupOutputContext) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - - mode, _ := m.GetValidationMode(in.Parameters) - - if m.ResponseChecksumValidation == aws.ResponseChecksumValidationWhenSupported || mode == checksumValidationModeEnabled { - m.SetValidationMode(in.Parameters, checksumValidationModeEnabled) - ctx = setContextOutputValidationMode(ctx, checksumValidationModeEnabled) - } - - return next.HandleInitialize(ctx, in) -} - -// outputValidationModeKey is the key set on context used to identify if -// output checksum validation is enabled. -type outputValidationModeKey struct{} - -// setContextOutputValidationMode sets the request checksum -// algorithm on the context. -// -// Scoped to stack values. -func setContextOutputValidationMode(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, outputValidationModeKey{}, value) -} - -// getContextOutputValidationMode returns response checksum validation state, -// if one was specified. Empty string is returned if one is not specified. -// -// Scoped to stack values. -func getContextOutputValidationMode(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go deleted file mode 100644 index 65dd4c1eff41..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go +++ /dev/null @@ -1,135 +0,0 @@ -package checksum - -import ( - "context" - "fmt" - "net/http" - "strings" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms -// that were used, by the middleware's validation. -type outputValidationAlgorithmsUsedKey struct{} - -// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used -// stored in the middleware Metadata. Returns false if no algorithms were -// stored in the Metadata. -func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) { - vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string) - return vs, ok -} - -// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the -// middleware Metadata. -func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) { - m.Set(outputValidationAlgorithmsUsedKey{}, vs) -} - -// validateOutputPayloadChecksum middleware computes payload checksum of the -// received response and validates with checksum returned by the service. -type validateOutputPayloadChecksum struct { - // Algorithms represents a priority-ordered list of valid checksum - // algorithm that should be validated when present in HTTP response - // headers. - Algorithms []Algorithm - - // IgnoreMultipartValidation indicates multipart checksums ending with "-#" - // will be ignored. - IgnoreMultipartValidation bool - - // When set the middleware will log when output does not have checksum or - // algorithm to validate. - LogValidationSkipped bool - - // When set the middleware will log when the output contains a multipart - // checksum that was, skipped and not validated. - LogMultipartValidationSkipped bool -} - -func (m *validateOutputPayloadChecksum) ID() string { - return "AWSChecksum:ValidateOutputPayloadChecksum" -} - -// HandleDeserialize is a Deserialize middleware that wraps the HTTP response -// body with an io.ReadCloser that will validate its checksum. -func (m *validateOutputPayloadChecksum) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - if mode := getContextOutputValidationMode(ctx); mode != checksumValidationModeEnabled { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("unknown transport type %T", out.RawResponse), - } - } - - // this runs BEFORE the deserializer, so we have to preemptively check for - // non-200, in which case there is no checksum to validate - if response.StatusCode != 200 { - return out, metadata, err - } - - var expectedChecksum string - var algorithmToUse Algorithm - for _, algorithm := range m.Algorithms { - value := response.Header.Get(AlgorithmHTTPHeader(algorithm)) - if len(value) == 0 { - continue - } - - expectedChecksum = value - algorithmToUse = algorithm - } - - logger := middleware.GetLogger(ctx) - - // Skip validation if no checksum algorithm or checksum is available. - if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { - if response.Body != http.NoBody && m.LogValidationSkipped { - // TODO this probably should have more information about the - // operation output that won't be validated. - logger.Logf(logging.Warn, - "Response has no supported checksum. Not validating response payload.") - } - return out, metadata, nil - } - - // Ignore multipart validation - if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") { - if m.LogMultipartValidationSkipped { - // TODO this probably should have more information about the - // operation output that won't be validated. - logger.Logf(logging.Warn, "Skipped validation of multipart checksum.") - } - return out, metadata, nil - } - - body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum) - if err != nil { - return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err) - } - response.Body = body - - // Update the metadata to include the set of the checksum algorithms that - // will be validated. - SetOutputValidationAlgorithmsUsed(&metadata, []string{ - string(algorithmToUse), - }) - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md deleted file mode 100644 index 62da8050f3b5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ /dev/null @@ -1,461 +0,0 @@ -# v1.13.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.12.9 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.18 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.16 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.11 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.8 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.6 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.5 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2024-03-05) - -* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility. - -# v1.11.3 (2024-03-04) - -* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API. - -# v1.11.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.37 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.36 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.29 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.28 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.27 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.26 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.25 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go deleted file mode 100644 index 5d5286f92cc8..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go +++ /dev/null @@ -1,56 +0,0 @@ -package presignedurl - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -// WithIsPresigning adds the isPresigning sentinel value to a context to signal -// that the middleware stack is using the presign flow. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func WithIsPresigning(ctx context.Context) context.Context { - return middleware.WithStackValue(ctx, isPresigningKey{}, true) -} - -// GetIsPresigning returns if the context contains the isPresigning sentinel -// value for presigning flows. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetIsPresigning(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) - return v -} - -type isPresigningKey struct{} - -// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that -// will update the stack's context to be flagged as being invoked for the -// purpose of presigning. -func AddAsIsPresigningMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) -} - -// AddAsIsPresigingMiddleware is an alias for backwards compatibility. -// -// Deprecated: This API was released with a typo. Use -// [AddAsIsPresigningMiddleware] instead. -func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { - return AddAsIsPresigningMiddleware(stack) -} - -type asIsPresigningMiddleware struct{} - -func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } - -func (asIsPresigningMiddleware) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - ctx = WithIsPresigning(ctx) - return next.HandleInitialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go deleted file mode 100644 index 1b85375cf806..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package presignedurl provides the customizations for API clients to fill in -// presigned URLs into input parameters. -package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go deleted file mode 100644 index 4a0c6ae3c9c2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package presignedurl - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go deleted file mode 100644 index 1e2f5c8122a0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go +++ /dev/null @@ -1,110 +0,0 @@ -package presignedurl - -import ( - "context" - "fmt" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - - "github.com/aws/smithy-go/middleware" -) - -// URLPresigner provides the interface to presign the input parameters in to a -// presigned URL. -type URLPresigner interface { - // PresignURL presigns a URL. - PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) -} - -// ParameterAccessor provides an collection of accessor to for retrieving and -// setting the values needed to PresignedURL generation -type ParameterAccessor struct { - // GetPresignedURL accessor points to a function that retrieves a presigned url if present - GetPresignedURL func(interface{}) (string, bool, error) - - // GetSourceRegion accessor points to a function that retrieves source region for presigned url - GetSourceRegion func(interface{}) (string, bool, error) - - // CopyInput accessor points to a function that takes in an input, and returns a copy. - CopyInput func(interface{}) (interface{}, error) - - // SetDestinationRegion accessor points to a function that sets destination region on api input struct - SetDestinationRegion func(interface{}, string) error - - // SetPresignedURL accessor points to a function that sets presigned url on api input struct - SetPresignedURL func(interface{}, string) error -} - -// Options provides the set of options needed by the presigned URL middleware. -type Options struct { - // Accessor are the parameter accessors used by this middleware - Accessor ParameterAccessor - - // Presigner is the URLPresigner used by the middleware - Presigner URLPresigner -} - -// AddMiddleware adds the Presign URL middleware to the middleware stack. -func AddMiddleware(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&presign{options: opts}, middleware.Before) -} - -// RemoveMiddleware removes the Presign URL middleware from the stack. -func RemoveMiddleware(stack *middleware.Stack) error { - _, err := stack.Initialize.Remove((*presign)(nil).ID()) - return err -} - -type presign struct { - options Options -} - -func (m *presign) ID() string { return "Presign" } - -func (m *presign) HandleInitialize( - ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - // If PresignedURL is already set ignore middleware. - if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } else if ok { - return next.HandleInitialize(ctx, input) - } - - // If have source region is not set ignore middleware. - srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } else if !ok || len(srcRegion) == 0 { - return next.HandleInitialize(ctx, input) - } - - // Create a copy of the original input so the destination region value can - // be added. This ensures that value does not leak into the original - // request parameters. - paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) - } - - // Destination region is the API client's configured region. - dstRegion := awsmiddleware.GetRegion(ctx) - if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } - - presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) - if err != nil { - return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) - } - - // Update the original input with the presigned URL value. - if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } - - return next.HandleInitialize(ctx, input) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md deleted file mode 100644 index d4dcf9d694e5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ /dev/null @@ -1,463 +0,0 @@ -# v1.19.4 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.3 (2025-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.2 (2025-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.18 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.17 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.16 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.15 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.14 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.13 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.12 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.11 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.10 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.18.9 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-03-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.1 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-03-21) - -* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-09-02) - -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. - -# v1.6.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-04) - -* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. - -# v1.3.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go deleted file mode 100644 index ec290b213510..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go +++ /dev/null @@ -1,53 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -// AccessPointARN provides representation -type AccessPointARN struct { - arn.ARN - AccessPointName string -} - -// GetARN returns the base ARN for the Access Point resource -func (a AccessPointARN) GetARN() arn.ARN { - return a.ARN -} - -// ParseAccessPointResource attempts to parse the ARN's resource as an -// AccessPoint resource. -// -// Supported Access point resource format: -// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} -// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint -func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { - if isFIPS(a.Region) { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} - } - if len(a.AccountID) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} - } - if len(resParts) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} - } - if len(resParts) > 1 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} - } - - resID := resParts[0] - if len(strings.TrimSpace(resID)) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} - } - - return AccessPointARN{ - ARN: a, - AccessPointName: resID, - }, nil -} - -func isFIPS(region string) bool { - return strings.HasPrefix(region, "fips-") || strings.HasSuffix(region, "-fips") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go deleted file mode 100644 index 06e1a3addd43..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go +++ /dev/null @@ -1,85 +0,0 @@ -package arn - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -var supportedServiceARN = []string{ - "s3", - "s3-outposts", - "s3-object-lambda", -} - -func isSupportedServiceARN(service string) bool { - for _, name := range supportedServiceARN { - if name == service { - return true - } - } - return false -} - -// Resource provides the interfaces abstracting ARNs of specific resource -// types. -type Resource interface { - GetARN() arn.ARN - String() string -} - -// ResourceParser provides the function for parsing an ARN's resource -// component into a typed resource. -type ResourceParser func(arn.ARN) (Resource, error) - -// ParseResource parses an AWS ARN into a typed resource for the S3 API. -func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) { - if len(a.Partition) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "partition not set"} - } - - if !isSupportedServiceARN(a.Service) { - return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} - } - - if len(a.Resource) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "resource not set"} - } - - return resParser(a) -} - -// SplitResource splits the resource components by the ARN resource delimiters. -func SplitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// IsARN returns whether the given string is an ARN -func IsARN(s string) bool { - return arn.IsARN(s) -} - -// InvalidARNError provides the error for an invalid ARN error. -type InvalidARNError struct { - ARN arn.ARN - Reason string -} - -// Error returns a string denoting the occurred InvalidARNError -func (e InvalidARNError) Error() string { - return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go deleted file mode 100644 index 9a3258e15aa2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go +++ /dev/null @@ -1,32 +0,0 @@ -package arn - -import "fmt" - -// arnable is implemented by the relevant S3/S3Control -// operations which have members that may need ARN -// processing. -type arnable interface { - SetARNMember(string) error - GetARNMember() (*string, bool) -} - -// GetARNField would be called during middleware execution -// to retrieve a member value that is an ARN in need of -// processing. -func GetARNField(input interface{}) (*string, bool) { - v, ok := input.(arnable) - if !ok { - return nil, false - } - return v.GetARNMember() -} - -// SetARNField would called during middleware exeuction -// to set a member value that required ARN processing. -func SetARNField(input interface{}, v string) error { - params, ok := input.(arnable) - if !ok { - return fmt.Errorf("Params does not contain arn field member") - } - return params.SetARNMember(v) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go deleted file mode 100644 index e06a3028570f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go +++ /dev/null @@ -1,128 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -// OutpostARN interface that should be satisfied by outpost ARNs -type OutpostARN interface { - Resource - GetOutpostID() string -} - -// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format -// and return a specific OutpostARN type -// -// Currently supported outpost ARN formats: -// * Outpost AccessPoint ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint -// -// * Outpost Bucket ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket -// -// Other outpost ARN formats may be supported and added in the future. -func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { - if len(a.Region) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "region not set"} - } - - if isFIPS(a.Region) { - return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} - } - - if len(a.AccountID) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} - } - - // verify if outpost id is present and valid - if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - // verify possible resource type exists - if len(resParts) < 3 { - return nil, InvalidARNError{ - ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", - } - } - - // Since we know this is a OutpostARN fetch outpostID - outpostID := strings.TrimSpace(resParts[0]) - - switch resParts[1] { - case "accesspoint": - accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) - if err != nil { - return OutpostAccessPointARN{}, err - } - return OutpostAccessPointARN{ - AccessPointARN: accesspointARN, - OutpostID: outpostID, - }, nil - - case "bucket": - bucketName, err := parseBucketResource(a, resParts[2:]) - if err != nil { - return nil, err - } - return OutpostBucketARN{ - ARN: a, - BucketName: bucketName, - OutpostID: outpostID, - }, nil - - default: - return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} - } -} - -// OutpostAccessPointARN represents outpost access point ARN. -type OutpostAccessPointARN struct { - AccessPointARN - OutpostID string -} - -// GetOutpostID returns the outpost id of outpost access point arn -func (o OutpostAccessPointARN) GetOutpostID() string { - return o.OutpostID -} - -// OutpostBucketARN represents the outpost bucket ARN. -type OutpostBucketARN struct { - arn.ARN - BucketName string - OutpostID string -} - -// GetOutpostID returns the outpost id of outpost bucket arn -func (o OutpostBucketARN) GetOutpostID() string { - return o.OutpostID -} - -// GetARN retrives the base ARN from outpost bucket ARN resource -func (o OutpostBucketARN) GetARN() arn.ARN { - return o.ARN -} - -// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the -// bucket resource id. -// -// parseBucketResource only parses the bucket resource id. -func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { - if len(resParts) == 0 { - return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} - } - if len(resParts) > 1 { - return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} - } - - bucketName = strings.TrimSpace(resParts[0]) - if len(bucketName) == 0 { - return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} - } - return bucketName, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go deleted file mode 100644 index 513154cc0e31..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go +++ /dev/null @@ -1,15 +0,0 @@ -package arn - -// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service -type S3ObjectLambdaARN interface { - Resource - - isS3ObjectLambdasARN() -} - -// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type -type S3ObjectLambdaAccessPointARN struct { - AccessPointARN -} - -func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go deleted file mode 100644 index b51532085f6f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go +++ /dev/null @@ -1,73 +0,0 @@ -package s3shared - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -// ARNLookup is the initial middleware that looks up if an arn is provided. -// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on -// middleware context. This middleware must be executed before input validation step or any other -// arn processing middleware. -type ARNLookup struct { - - // GetARNValue takes in a input interface and returns a ptr to string and a bool - GetARNValue func(interface{}) (*string, bool) -} - -// ID for the middleware -func (m *ARNLookup) ID() string { - return "S3Shared:ARNLookup" -} - -// HandleInitialize handles the behavior of this initialize step -func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - // check if GetARNValue is supported - if m.GetARNValue == nil { - return next.HandleInitialize(ctx, in) - } - - // check is input resource is an ARN; if not go to next - v, ok := m.GetARNValue(in.Parameters) - if !ok || v == nil || !arn.IsARN(*v) { - return next.HandleInitialize(ctx, in) - } - - // if ARN process ResourceRequest and put it on ctx - av, err := arn.Parse(*v) - if err != nil { - return out, metadata, fmt.Errorf("error parsing arn: %w", err) - } - // set parsed arn on context - ctx = setARNResourceOnContext(ctx, av) - - return next.HandleInitialize(ctx, in) -} - -// arnResourceKey is the key set on context used to identify, retrive an ARN resource -// if present on the context. -type arnResourceKey struct{} - -// SetARNResourceOnContext sets the S3 ARN on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context { - return middleware.WithStackValue(ctx, arnResourceKey{}, value) -} - -// GetARNResourceFromContext returns an ARN from context and a bool indicating -// presence of ARN on ctx. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) { - v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN) - return v, ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go deleted file mode 100644 index b5d31f5c574f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go +++ /dev/null @@ -1,41 +0,0 @@ -package config - -import "context" - -// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion -type UseARNRegionProvider interface { - GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error) -} - -// DisableMultiRegionAccessPointsProvider is an interface for retrieving external configuration value for DisableMultiRegionAccessPoints -type DisableMultiRegionAccessPointsProvider interface { - GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value bool, found bool, err error) -} - -// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseARNRegionProvider); ok { - value, found, err = p.GetS3UseARNRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ResolveDisableMultiRegionAccessPoints extracts the first instance of a DisableMultiRegionAccessPoints from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveDisableMultiRegionAccessPoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(DisableMultiRegionAccessPointsProvider); ok { - value, found, err = p.GetS3DisableMultiRegionAccessPoints(ctx) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go deleted file mode 100644 index aa0c3714e2b0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go +++ /dev/null @@ -1,183 +0,0 @@ -package s3shared - -import ( - "fmt" - - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" -) - -// TODO: fix these error statements to be relevant to v2 sdk - -const ( - invalidARNErrorErrCode = "InvalidARNError" - configurationErrorErrCode = "ConfigurationError" -) - -// InvalidARNError denotes the error for Invalid ARN -type InvalidARNError struct { - message string - resource arn.Resource - origErr error -} - -// Error returns the InvalidARN error string -func (e InvalidARNError) Error() string { - var extra string - if e.resource != nil { - extra = "ARN: " + e.resource.String() - } - msg := invalidARNErrorErrCode + " : " + e.message - if extra != "" { - msg = msg + "\n\t" + extra - } - - return msg -} - -// OrigErr is the original error wrapped by Invalid ARN Error -func (e InvalidARNError) Unwrap() error { - return e.origErr -} - -// NewInvalidARNError denotes invalid arn error -func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "invalid ARN", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition -func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported for the target ARN partition", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithFIPSError ARN not supported for FIPS region -// -// Deprecated: FIPS will not appear in the ARN region component. -func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported for FIPS region", - resource: resource, - origErr: err, - } -} - -// ConfigurationError is used to denote a client configuration error -type ConfigurationError struct { - message string - resource arn.Resource - clientPartitionID string - clientRegion string - origErr error -} - -// Error returns the Configuration error string -func (e ConfigurationError) Error() string { - extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", - e.resource, e.clientPartitionID, e.clientRegion) - - msg := configurationErrorErrCode + " : " + e.message - if extra != "" { - msg = msg + "\n\t" + extra - } - return msg -} - -// OrigErr is the original error wrapped by Configuration Error -func (e ConfigurationError) Unwrap() error { - return e.origErr -} - -// NewClientPartitionMismatchError stub -func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client partition does not match provided ARN partition", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientRegionMismatchError denotes cross region access error -func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client region does not match provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewFailedToResolveEndpointError denotes endpoint resolving error -func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "endpoint resolver failed to find an endpoint for the provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access -func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for fips but cross-region resource ARN provided", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS -func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "use of ARN is not supported when client or request is configured for FIPS", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate -func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for S3 Accelerate but is not supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request -func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack -func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for S3 Dual-stack but is not supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go deleted file mode 100644 index 7ddf2e5e7e40..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package s3shared - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.19.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go deleted file mode 100644 index 85b60d2a1b92..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go +++ /dev/null @@ -1,29 +0,0 @@ -package s3shared - -import ( - "github.com/aws/smithy-go/middleware" -) - -// hostID is used to retrieve host id from response metadata -type hostID struct { -} - -// SetHostIDMetadata sets the provided host id over middleware metadata -func SetHostIDMetadata(metadata *middleware.Metadata, id string) { - metadata.Set(hostID{}, id) -} - -// GetHostIDMetadata retrieves the host id from middleware metadata -// returns host id as string along with a boolean indicating presence of -// hostId on middleware metadata. -func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { - if !metadata.Has(hostID{}) { - return "", false - } - - v, ok := metadata.Get(hostID{}).(string) - if !ok { - return "", true - } - return v, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go deleted file mode 100644 index f02604cb62aa..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go +++ /dev/null @@ -1,28 +0,0 @@ -package s3shared - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -// clonedInputKey used to denote if request input was cloned. -type clonedInputKey struct{} - -// SetClonedInputKey sets a key on context to denote input was cloned previously. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetClonedInputKey(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, clonedInputKey{}, value) -} - -// IsClonedInput retrieves if context key for cloned input was set. -// If set, we can infer that the reuqest input was cloned previously. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func IsClonedInput(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go deleted file mode 100644 index 7251588b0039..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go +++ /dev/null @@ -1,57 +0,0 @@ -package s3shared - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const metadataRetrieverID = "S3MetadataRetriever" - -// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware -func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error { - // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as - // host id, request id from response header returned by operation deserializers - return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before) -} - -type metadataRetriever struct { -} - -// ID returns the middleware identifier -func (m *metadataRetriever) ID() string { - return metadataRetrieverID -} - -func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - - span, _ := tracing.GetSpan(ctx) - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // check for header for Request id - if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { - // set reqID on metadata for successful responses. - awsmiddleware.SetRequestIDMetadata(&metadata, v) - span.SetProperty("aws.request_id", v) - } - - // look up host-id - if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { - // set reqID on metadata for successful responses. - SetHostIDMetadata(&metadata, v) - span.SetProperty("aws.extended_request_id", v) - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go deleted file mode 100644 index bee8da3fe346..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go +++ /dev/null @@ -1,77 +0,0 @@ -package s3shared - -import ( - "fmt" - "strings" - - awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" -) - -// ResourceRequest represents an ARN resource and api request metadata -type ResourceRequest struct { - Resource arn.Resource - // RequestRegion is the region configured on the request config - RequestRegion string - - // SigningRegion is the signing region resolved for the request - SigningRegion string - - // PartitionID is the resolved partition id for the provided request region - PartitionID string - - // UseARNRegion indicates if client should use the region provided in an ARN resource - UseARNRegion bool - - // UseFIPS indicates if te client is configured for FIPS - UseFIPS bool -} - -// ARN returns the resource ARN -func (r ResourceRequest) ARN() awsarn.ARN { - return r.Resource.GetARN() -} - -// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS -// -// Deprecated: FIPS will not be present in the ARN region -func (r ResourceRequest) ResourceConfiguredForFIPS() bool { - return IsFIPS(r.ARN().Region) -} - -// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set -func (r ResourceRequest) AllowCrossRegion() bool { - return r.UseARNRegion -} - -// IsCrossPartition returns true if request is configured for region of another partition, than -// the partition that resource ARN region resolves to. IsCrossPartition will not return an error, -// if request is not configured with a specific partition id. This might happen if customer provides -// custom endpoint url, but does not associate a partition id with it. -func (r ResourceRequest) IsCrossPartition() (bool, error) { - rv := r.PartitionID - if len(rv) == 0 { - return false, nil - } - - av := r.Resource.GetARN().Partition - if len(av) == 0 { - return false, fmt.Errorf("no partition id for provided ARN") - } - - return !strings.EqualFold(rv, av), nil -} - -// IsCrossRegion returns true if request signing region is not same as arn region -func (r ResourceRequest) IsCrossRegion() bool { - v := r.SigningRegion - return !strings.EqualFold(v, r.Resource.GetARN().Region) -} - -// IsFIPS returns true if region is a fips pseudo-region -// -// Deprecated: FIPS should be specified via EndpointOptions. -func IsFIPS(region string) bool { - return strings.HasPrefix(region, "fips-") || - strings.HasSuffix(region, "-fips") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go deleted file mode 100644 index 85733624306d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go +++ /dev/null @@ -1,33 +0,0 @@ -package s3shared - -import ( - "errors" - "fmt" - - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" -) - -// ResponseError provides the HTTP centric error type wrapping the underlying error -// with the HTTP response value and the deserialized RequestID. -type ResponseError struct { - *awshttp.ResponseError - - // HostID associated with response error - HostID string -} - -// ServiceHostID returns the host id associated with Response Error -func (e *ResponseError) ServiceHostID() string { return e.HostID } - -// Error returns the formatted error -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v", - e.Response.StatusCode, e.RequestID, e.HostID, e.Err) -} - -// As populates target and returns true if the type of target is a error type that -// the ResponseError embeds, (e.g.S3 HTTP ResponseError) -func (e *ResponseError) As(target interface{}) bool { - return errors.As(e.ResponseError, target) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go deleted file mode 100644 index 54357624506d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go +++ /dev/null @@ -1,60 +0,0 @@ -package s3shared - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddResponseErrorMiddleware adds response error wrapper middleware -func AddResponseErrorMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before request id retriever middleware so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before) -} - -type errorWrapper struct { -} - -// ID returns the middleware identifier -func (m *errorWrapper) ID() string { - return "ResponseErrorWrapper" -} - -func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err == nil { - // Nothing to do when there is no error. - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // look for request id in metadata - reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) - // look for host id in metadata - hostID, _ := GetHostIDMetadata(metadata) - - // Wrap the returned smithy error with the request id retrieved from the metadata - err = &ResponseError{ - ResponseError: &awshttp.ResponseError{ - ResponseError: &smithyhttp.ResponseError{ - Response: resp, - Err: err, - }, - RequestID: reqID, - }, - HostID: hostID, - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go deleted file mode 100644 index 0f43ec0d4feb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go +++ /dev/null @@ -1,54 +0,0 @@ -package s3shared - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const s3100ContinueID = "S3100Continue" -const default100ContinueThresholdBytes int64 = 1024 * 1024 * 2 - -// Add100Continue add middleware, which adds {Expect: 100-continue} header for s3 client HTTP PUT request larger than 2MB -// or with unknown size streaming bodies, during operation builder step -func Add100Continue(stack *middleware.Stack, continueHeaderThresholdBytes int64) error { - return stack.Build.Add(&s3100Continue{ - continueHeaderThresholdBytes: continueHeaderThresholdBytes, - }, middleware.After) -} - -type s3100Continue struct { - continueHeaderThresholdBytes int64 -} - -// ID returns the middleware identifier -func (m *s3100Continue) ID() string { - return s3100ContinueID -} - -func (m *s3100Continue) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - sizeLimit := default100ContinueThresholdBytes - switch { - case m.continueHeaderThresholdBytes == -1: - return next.HandleBuild(ctx, in) - case m.continueHeaderThresholdBytes > 0: - sizeLimit = m.continueHeaderThresholdBytes - default: - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - if req.ContentLength == -1 || (req.ContentLength == 0 && req.Body != nil) || req.ContentLength >= sizeLimit { - req.Header.Set("Expect", "100-continue") - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go deleted file mode 100644 index 22773199f62a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go +++ /dev/null @@ -1,78 +0,0 @@ -package s3shared - -import ( - "context" - "fmt" - "strings" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" -) - -// EnableDualstack represents middleware struct for enabling dualstack support -// -// Deprecated: See EndpointResolverOptions' UseDualStackEndpoint support -type EnableDualstack struct { - // UseDualstack indicates if dualstack endpoint resolving is to be enabled - UseDualstack bool - - // DefaultServiceID is the service id prefix used in endpoint resolving - // by default service-id is 's3' and 's3-control' for service s3, s3control. - DefaultServiceID string -} - -// ID returns the middleware ID. -func (*EnableDualstack) ID() string { - return "EnableDualstack" -} - -// HandleSerialize handles serializer middleware behavior when middleware is executed -func (u *EnableDualstack) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - - // check for host name immutable property - if smithyhttp.GetHostnameImmutable(ctx) { - return next.HandleSerialize(ctx, in) - } - - serviceID := awsmiddle.GetServiceID(ctx) - - // s3-control may be represented as `S3 Control` as in model - if serviceID == "S3 Control" { - serviceID = "s3-control" - } - - if len(serviceID) == 0 { - // default service id - serviceID = u.DefaultServiceID - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - if u.UseDualstack { - parts := strings.Split(req.URL.Host, ".") - if len(parts) < 3 { - return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host) - } - - for i := 0; i+1 < len(parts); i++ { - if strings.EqualFold(parts[i], serviceID) { - parts[i] = parts[i] + ".dualstack" - break - } - } - - // construct the url host - req.URL.Host = strings.Join(parts, ".") - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go deleted file mode 100644 index 65fd07e0006d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go +++ /dev/null @@ -1,89 +0,0 @@ -package s3shared - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "strings" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` -} - -// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body -func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { - var errComponents ErrorComponents - if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) - } - return errComponents, nil -} - -// GetWrappedErrorResponseComponents returns the error fields from an xml error response body -// in which error code, and message are wrapped by a tag -func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { - var errComponents struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - } - - if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) - } - - return ErrorComponents{ - Code: errComponents.Code, - Message: errComponents.Message, - RequestID: errComponents.RequestID, - HostID: errComponents.HostID, - }, nil -} - -// GetErrorResponseComponents retrieves error components according to passed in options -func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) { - var errComponents ErrorComponents - var err error - - if options.IsWrappedWithErrorTag { - errComponents, err = GetWrappedErrorResponseComponents(r) - } else { - errComponents, err = GetUnwrappedErrorResponseComponents(r) - } - - if err != nil { - return ErrorComponents{}, err - } - - // If an error code or message is not retrieved, it is derived from the http status code - // eg, for S3 service, we derive err code and message, if none is found - if options.UseStatusCode && len(errComponents.Code) == 0 && - len(errComponents.Message) == 0 { - // derive code and message from status code - statusText := http.StatusText(options.StatusCode) - errComponents.Code = strings.Replace(statusText, " ", "", -1) - errComponents.Message = statusText - } - return errComponents, nil -} - -// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service -type ErrorResponseDeserializerOptions struct { - // UseStatusCode denotes if status code should be used to retrieve error code, msg - UseStatusCode bool - - // StatusCode is status code of error response - StatusCode int - - //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an - // additional tag - IsWrappedWithErrorTag bool -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md deleted file mode 100644 index 81a4c25f824e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ /dev/null @@ -1,910 +0,0 @@ -# v1.87.1 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.87.0 (2025-08-11) - -* **Feature**: Add support for configuring per-service Options via callback on global config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.86.0 (2025-08-04) - -* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.85.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.85.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.84.1 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.84.0 (2025-07-15) - -* **Feature**: Amazon S3 Metadata live inventory tables provide a queryable inventory of all the objects in your general purpose bucket so that you can determine the latest state of your data. To help minimize your storage costs, use journal table record expiration to set a retention period for your records. - -# v1.83.0 (2025-07-02) - -* **Feature**: Added support for directory bucket creation with tags and bucket ARN retrieval in CreateBucket, ListDirectoryBuckets, and HeadBucket operations - -# v1.82.0 (2025-06-25) - -* **Feature**: Adds support for additional server-side encryption mode and storage class values for accessing Amazon FSx data from Amazon S3 using S3 Access Points - -# v1.81.0 (2025-06-18) - -* **Feature**: Added support for renaming objects within the same bucket using the new RenameObject API. - -# v1.80.3 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.80.2 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.80.1 (2025-06-06) - -* No change notes available for this release. - -# v1.80.0 (2025-05-29) - -* **Feature**: Adding checksum support for S3 PutBucketOwnershipControls API. - -# v1.79.4 (2025-05-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.79.3 (2025-04-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.79.2 (2025-04-10) - -* No change notes available for this release. - -# v1.79.1 (2025-04-03) - -* No change notes available for this release. - -# v1.79.0 (2025-03-31) - -* **Feature**: Amazon S3 adds support for S3 Access Points for directory buckets in AWS Dedicated Local Zones - -# v1.78.2 (2025-03-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.78.1 (2025-03-04.2) - -* **Bug Fix**: Add assurance test for operation order. - -# v1.78.0 (2025-02-27) - -* **Feature**: Track credential providers via User-Agent Feature ids -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.77.1 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.77.0 (2025-02-14) - -* **Feature**: Added support for Content-Range header in HeadObject response. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.76.1 (2025-02-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.76.0 (2025-02-06) - -* **Feature**: Updated list of the valid AWS Region values for the LocationConstraint parameter for general purpose buckets. - -# v1.75.4 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.75.3 (2025-02-04) - -* No change notes available for this release. - -# v1.75.2 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.75.1 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.75.0 (2025-01-29) - -* **Feature**: Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long. - -# v1.74.1 (2025-01-24) - -* **Bug Fix**: Enable request checksum validation mode by default -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.74.0 (2025-01-22) - -* **Feature**: Add a client config option to disable logging when output checksum validation is skipped due to an unsupported algorithm. - -# v1.73.2 (2025-01-17) - -* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.73.1 (2025-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.73.0 (2025-01-15) - -* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. -* **Feature**: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.72.3 (2025-01-14) - -* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. - -# v1.72.2 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.72.1 (2025-01-08) - -* No change notes available for this release. - -# v1.72.0 (2025-01-03) - -* **Feature**: This change is only for updating the model regexp of CopySource which is not for validation but only for documentation and user guide change. - -# v1.71.1 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.71.0 (2024-12-03.2) - -* **Feature**: Amazon S3 Metadata stores object metadata in read-only, fully managed Apache Iceberg metadata tables that you can query. You can create metadata table configurations for S3 general purpose buckets. - -# v1.70.0 (2024-12-02) - -* **Feature**: Amazon S3 introduces support for AWS Dedicated Local Zones -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.69.0 (2024-11-25) - -* **Feature**: Amazon Simple Storage Service / Features: Add support for ETag based conditional writes in PutObject and CompleteMultiPartUpload APIs to prevent unintended object modifications. - -# v1.68.0 (2024-11-21) - -* **Feature**: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API. - -# v1.67.1 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.67.0 (2024-11-14) - -* **Feature**: This release updates the ListBuckets API Reference documentation in support of the new 10,000 general purpose bucket default quota on all AWS accounts. To increase your bucket quota from 10,000 to up to 1 million buckets, simply request a quota increase via Service Quotas. - -# v1.66.3 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.66.2 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.66.1 (2024-10-25) - -* **Bug Fix**: Update presign post URL resolution to use the exact result from EndpointResolverV2 - -# v1.66.0 (2024-10-16) - -* **Feature**: Add support for the new optional bucket-region and prefix query parameters in the ListBuckets API. For ListBuckets requests that express pagination, Amazon S3 will now return both the bucket names and associated AWS regions in the response. - -# v1.65.3 (2024-10-11) - -* **Bug Fix**: **BREAKING CHANGE**: S3 ReplicationRuleFilter and LifecycleRuleFilter shapes are being changed from union to structure types - -# v1.65.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.65.1 (2024-10-07) - -* **Bug Fix**: **CHANGE IN BEHAVIOR**: Allow serialization of headers with empty string for prefix headers. We are deploying this fix because the behavior is actively preventing users from transmitting keys with empty values to the service. If you were setting metadata keys with empty values before this change, they will now actually be sent to the service. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.65.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.64.1 (2024-10-03) - -* No change notes available for this release. - -# v1.64.0 (2024-10-02) - -* **Feature**: This release introduces a header representing the minimum object size limit for Lifecycle transitions. - -# v1.63.3 (2024-09-27) - -* No change notes available for this release. - -# v1.63.2 (2024-09-25) - -* No change notes available for this release. - -# v1.63.1 (2024-09-23) - -* No change notes available for this release. - -# v1.63.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.62.0 (2024-09-18) - -* **Feature**: Added SSE-KMS support for directory buckets. - -# v1.61.3 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.61.2 (2024-09-04) - -* No change notes available for this release. - -# v1.61.1 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.61.0 (2024-08-28) - -* **Feature**: Add presignPost for s3 PutObject - -# v1.60.1 (2024-08-22) - -* No change notes available for this release. - -# v1.60.0 (2024-08-20) - -* **Feature**: Amazon Simple Storage Service / Features : Add support for conditional writes for PutObject and CompleteMultipartUpload APIs. - -# v1.59.0 (2024-08-15) - -* **Feature**: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.58.3 (2024-08-02) - -* **Bug Fix**: Add assurance tests for auth scheme selection logic. - -# v1.58.2 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.58.1 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.58.0 (2024-07-02) - -* **Feature**: Added response overrides to Head Object requests. - -# v1.57.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.57.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.56.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.56.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.55.2 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.55.1 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.55.0 (2024-06-05) - -* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. -* **Bug Fix**: Add S3-specific smithy protocol tests. - -# v1.54.4 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.54.3 (2024-05-23) - -* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK). - -# v1.54.2 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.54.1 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.54.0 (2024-05-14) - -* **Feature**: Updated a few x-id in the http uri traits - -# v1.53.2 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.53.1 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.53.0 (2024-03-18) - -* **Feature**: Fix two issues with response root node names. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.52.1 (2024-03-15) - -* **Documentation**: Documentation updates for Amazon S3. - -# v1.52.0 (2024-03-13) - -* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT). - -# v1.51.4 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.3 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.2 (2024-03-04) - -* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.50.3 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.50.2 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.50.1 (2024-02-19) - -* **Bug Fix**: Prevent potential panic caused by invalid comparison of credentials. - -# v1.50.0 (2024-02-16) - -* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. - -# v1.49.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.48.1 (2024-01-24) - -* No change notes available for this release. - -# v1.48.0 (2024-01-05) - -* **Feature**: Support smithy sigv4a trait for codegen. - -# v1.47.8 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.7 (2023-12-20) - -* No change notes available for this release. - -# v1.47.6 (2023-12-18) - -* No change notes available for this release. - -# v1.47.5 (2023-12-08) - -* **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver. -* **Bug Fix**: Improve uniqueness of default S3Express sesssion credentials cache keying to prevent collision in multi-credential scenarios. -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.47.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.47.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.46.0 (2023-11-28.2) - -* **Feature**: Add S3Express support. -* **Feature**: Adds support for S3 Express One Zone. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.45.1 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.45.0 (2023-11-27) - -* **Feature**: Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality. - -# v1.44.0 (2023-11-21) - -* **Feature**: Add support for automatic date based partitioning in S3 Server Access Logs. -* **Bug Fix**: Don't send MaxKeys/MaxUploads=0 when unspecified in ListObjectVersions and ListMultipartUploads paginators. - -# v1.43.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.43.0 (2023-11-17) - -* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. -* **Feature**: Removes all default 0 values for numbers and false values for booleans - -# v1.42.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.42.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.42.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.41.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.40.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.40.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.40.0 (2023-09-26) - -* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. - -# v1.39.0 (2023-09-20) - -* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException - -# v1.38.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.1 (2023-08-01) - -* No change notes available for this release. - -# v1.38.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.37.1 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.37.0 (2023-07-13) - -* **Feature**: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.36.0 (2023-06-28) - -* **Feature**: The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response. - -# v1.35.0 (2023-06-16) - -* **Feature**: This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. - -# v1.34.1 (2023-06-15) - -* No change notes available for this release. - -# v1.34.0 (2023-06-13) - -* **Feature**: Integrate double encryption feature to SDKs. -* **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.1 (2023-05-04) - -* **Documentation**: Documentation updates for Amazon S3 - -# v1.33.0 (2023-04-24) - -* **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.0 (2023-04-19) - -* **Feature**: Provides support for "Snow" Storage class. - -# v1.31.3 (2023-04-10) - -* No change notes available for this release. - -# v1.31.2 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.1 (2023-03-31) - -* **Documentation**: Documentation updates for Amazon S3 - -# v1.31.0 (2023-03-21) - -* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.6 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.5 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.30.4 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.3 (2023-02-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.2 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.1 (2023-01-23) - -* No change notes available for this release. - -# v1.30.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.29.6 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.5 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.4 (2022-11-22) - -* No change notes available for this release. - -# v1.29.3 (2022-11-16) - -* No change notes available for this release. - -# v1.29.2 (2022-11-10) - -* No change notes available for this release. - -# v1.29.1 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.0 (2022-10-21) - -* **Feature**: S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. -* **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2022-10-19) - -* **Feature**: Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. - -# v1.27.11 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.10 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.9 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.8 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.7 (2022-08-30) - -* No change notes available for this release. - -# v1.27.6 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.5 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.3 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.2 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2022-07-01) - -* **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). - -# v1.26.12 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.11 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.10 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.9 (2022-05-06) - -* No change notes available for this release. - -# v1.26.8 (2022-05-03) - -* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. - -# v1.26.7 (2022-04-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2022-04-12) - -* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. - -# v1.26.4 (2022-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2022-01-28) - -* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. - -# v1.24.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. -* **Feature**: Updated to latest service endpoints - -# v1.21.0 (2021-12-02) - -* **Feature**: API client updated -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2021-11-30) - -* **Feature**: API client updated - -# v1.19.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2021-11-12) - -* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. - -# v1.18.0 (2021-11-06) - -* **Feature**: Support has been added for the SelectObjectContent API. -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2021-09-17) - -* **Feature**: Updated API client and endpoints to latest revision. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2021-09-10) - -* No change notes available for this release. - -# v1.15.0 (2021-09-02) - -* **Feature**: API client updated -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2021-08-27) - -* **Feature**: Updated API model to latest revision. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2021-08-19) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2021-08-04) - -* **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-06-04) - -* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. -* **Feature**: Updated service client to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-05-25) - -* **Feature**: API client updated - -# v1.8.0 (2021-05-20) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Feature**: Updated to latest service API model. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go deleted file mode 100644 index d6637410a86e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ /dev/null @@ -1,1461 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - cryptorand "crypto/rand" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - smithyrand "github.com/aws/smithy-go/rand" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "S3" -const ServiceAPIVersion = "2006-03-01" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/s3") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/s3") -} - -// Client provides the API client to make operations call for Amazon Simple -// Storage Service. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveIdempotencyTokenProvider(&options) - - resolveEndpointResolverV2(&options) - - resolveHTTPSignerV4a(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - resolveExpressCredentials(&options) - - finalizeServiceEndpointAuthResolver(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - finalizeExpressCredentials(&options, client) - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - setSafeEventStreamClientLogMode(&options, opID) - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - finalizeOperationExpressCredentials(&options, *c) - - finalizeOperationEndpointAuthResolver(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{ - Signer: options.httpSignerV4a, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - RequestChecksumCalculation: cfg.RequestChecksumCalculation, - ResponseChecksumValidation: cfg.ResponseChecksumValidation, - AuthSchemePreference: cfg.AuthSchemePreference, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveInterceptors(cfg, &opts) - resolveUseARNRegion(cfg, &opts) - resolveDisableMultiRegionAccessPoints(cfg, &opts) - resolveDisableExpressAuth(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, func(o *Options) { - for _, opt := range cfg.ServiceOptions { - opt(ServiceID, o) - } - for _, opt := range optFns { - opt(o) - } - }) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func resolveInterceptors(cfg aws.Config, o *Options) { - o.Interceptors = cfg.Interceptors.Copy() -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - so.DisableURIPathEscaping = true - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func resolveIdempotencyTokenProvider(o *Options) { - if o.IdempotencyTokenProvider != nil { - return - } - o.IdempotencyTokenProvider = smithyrand.NewUUIDIdempotencyToken(cryptorand.Reader) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") - }) - if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves UseARNRegion S3 configuration -func resolveUseARNRegion(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.UseARNRegion = value - } - return nil -} - -// resolves DisableMultiRegionAccessPoints S3 configuration -func resolveDisableMultiRegionAccessPoints(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := s3sharedconfig.ResolveDisableMultiRegionAccessPoints(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.DisableMultiRegionAccessPoints = value - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -type httpSignerV4a interface { - SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, - service string, regionSet []string, signingTime time.Time, - optFns ...func(*v4a.SignerOptions)) error -} - -func resolveHTTPSignerV4a(o *Options) { - if o.httpSignerV4a != nil { - return - } - o.httpSignerV4a = newDefaultV4aSigner(*o) -} - -func newDefaultV4aSigner(o Options) *v4a.Signer { - return v4a.NewSigner(func(so *v4a.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -func addRequestChecksumMetricsTracking(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - return stack.Build.Insert(&internalChecksum.RequestChecksumMetricsTracking{ - RequestChecksumCalculation: options.RequestChecksumCalculation, - UserAgent: ua, - }, "UserAgent", middleware.Before) -} - -func addResponseChecksumMetricsTracking(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - return stack.Build.Insert(&internalChecksum.ResponseChecksumMetricsTracking{ - ResponseChecksumValidation: options.ResponseChecksumValidation, - UserAgent: ua, - }, "UserAgent", middleware.Before) -} - -type setCredentialSourceMiddleware struct { - ua *awsmiddleware.RequestUserAgent - options Options -} - -func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } - -func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) - if !ok { - return next.HandleBuild(ctx, in) - } - providerSources := asProviderSource.ProviderSources() - for _, source := range providerSources { - m.ua.AddCredentialsSource(source) - } - return next.HandleBuild(ctx, in) -} - -func addCredentialSource(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - mw := setCredentialSourceMiddleware{ua: ua, options: options} - return stack.Build.Insert(&mw, "UserAgent", middleware.Before) -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -// IdempotencyTokenProvider interface for providing idempotency token -type IdempotencyTokenProvider interface { - GetIdempotencyToken() (string, error) -} - -func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { - return s3shared.AddMetadataRetrieverMiddleware(stack) -} - -func add100Continue(stack *middleware.Stack, options Options) error { - return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes) -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -// ComputedInputChecksumsMetadata provides information about the algorithms used -// to compute the checksum(s) of the input payload. -type ComputedInputChecksumsMetadata struct { - // ComputedChecksums is a map of algorithm name to checksum value of the computed - // input payload's checksums. - ComputedChecksums map[string]string -} - -// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of -// algorithms and input payload checksums values. -func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) { - values, ok := internalChecksum.GetComputedInputChecksums(m) - if !ok { - return ComputedInputChecksumsMetadata{}, false - } - return ComputedInputChecksumsMetadata{ - ComputedChecksums: values, - }, true - -} - -func addInputChecksumMiddleware(stack *middleware.Stack, options internalChecksum.InputMiddlewareOptions) (err error) { - err = stack.Initialize.Add(&internalChecksum.SetupInputContext{ - GetAlgorithm: options.GetAlgorithm, - RequireChecksum: options.RequireChecksum, - RequestChecksumCalculation: options.RequestChecksumCalculation, - }, middleware.Before) - if err != nil { - return err - } - - stack.Build.Remove("ContentChecksum") - - inputChecksum := &internalChecksum.ComputeInputPayloadChecksum{ - EnableTrailingChecksum: options.EnableTrailingChecksum, - EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, - EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, - } - if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { - return err - } - - if options.EnableTrailingChecksum { - trailerMiddleware := &internalChecksum.AddInputChecksumTrailer{ - EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, - EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, - EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, - } - if err := stack.Finalize.Insert(trailerMiddleware, inputChecksum.ID(), middleware.After); err != nil { - return err - } - } - - return nil -} - -// ChecksumValidationMetadata contains metadata such as the checksum algorithm -// used for data integrity validation. -type ChecksumValidationMetadata struct { - // AlgorithmsUsed is the set of the checksum algorithms used to validate the - // response payload. The response payload must be completely read in order for the - // checksum validation to be performed. An error is returned by the operation - // output's response io.ReadCloser if the computed checksums are invalid. - AlgorithmsUsed []string -} - -// GetChecksumValidationMetadata returns the set of algorithms that will be used -// to validate the response payload with. The response payload must be completely -// read in order for the checksum validation to be performed. An error is returned -// by the operation output's response io.ReadCloser if the computed checksums are -// invalid. Returns false if no checksum algorithm used metadata was found. -func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) { - values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m) - if !ok { - return ChecksumValidationMetadata{}, false - } - return ChecksumValidationMetadata{ - AlgorithmsUsed: append(make([]string, 0, len(values)), values...), - }, true - -} - -// nopGetBucketAccessor is no-op accessor for operation that don't support bucket -// member as input -func nopGetBucketAccessor(input interface{}) (*string, bool) { - return nil, false -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return s3shared.AddResponseErrorMiddleware(stack) -} - -func disableAcceptEncodingGzip(stack *middleware.Stack) error { - return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{}) -} - -// ResponseError provides the HTTP centric error type wrapping the underlying -// error with the HTTP response value and the deserialized RequestID. -type ResponseError interface { - error - - ServiceHostID() string - ServiceRequestID() string -} - -var _ ResponseError = (*s3shared.ResponseError)(nil) - -// GetHostIDMetadata retrieves the host id from middleware metadata returns host -// id as string along with a boolean indicating presence of hostId on middleware -// metadata. -func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { - return s3shared.GetHostIDMetadata(metadata) -} - -// HTTPPresignerV4 represents presigner interface used by presign url client -type HTTPPresignerV4 interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// httpPresignerV4a represents sigv4a presigner interface used by presign url -// client -type httpPresignerV4a interface { - PresignHTTP( - ctx context.Context, credentials v4a.Credentials, r *http.Request, - payloadHash string, service string, regionSet []string, signingTime time.Time, - optFns ...func(*v4a.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignOptions represents the presign client options -type PresignOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // Presigner is the presigner used by the presign url client - Presigner HTTPPresignerV4 - - // Expires sets the expiration duration for the generated presign url. This should - // be the duration in seconds the presigned URL should be considered valid for. If - // not set or set to zero, presign url would default to expire after 900 seconds. - Expires time.Duration - - // presignerV4a is the presigner used by the presign url client - presignerV4a httpPresignerV4a -} - -func (o PresignOptions) copy() PresignOptions { - clientOptions := make([]func(*Options), len(o.ClientOptions)) - copy(clientOptions, o.ClientOptions) - o.ClientOptions = clientOptions - return o -} - -// WithPresignClientFromClientOptions is a helper utility to retrieve a function -// that takes PresignOption as input -func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { - return withPresignClientFromClientOptions(optFns).options -} - -type withPresignClientFromClientOptions []func(*Options) - -func (w withPresignClientFromClientOptions) options(o *PresignOptions) { - o.ClientOptions = append(o.ClientOptions, w...) -} - -// WithPresignExpires is a helper utility to append Expires value on presign -// options optional function -func WithPresignExpires(dur time.Duration) func(*PresignOptions) { - return withPresignExpires(dur).options -} - -type withPresignExpires time.Duration - -func (w withPresignExpires) options(o *PresignOptions) { - o.Expires = time.Duration(w) -} - -// PresignClient represents the presign url client -type PresignClient struct { - client *Client - options PresignOptions -} - -// NewPresignClient generates a presign client using provided API Client and -// presign options -func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { - var options PresignOptions - for _, fn := range optFns { - fn(&options) - } - if len(options.ClientOptions) != 0 { - c = New(c.options, options.ClientOptions...) - } - - if options.Presigner == nil { - options.Presigner = newDefaultV4Signer(c.options) - } - - if options.presignerV4a == nil { - options.presignerV4a = newDefaultV4aSigner(c.options) - } - - return &PresignClient{ - client: c, - options: options, - } -} - -func withNopHTTPClientAPIOption(o *Options) { - o.HTTPClient = smithyhttp.NopClient{} -} - -type presignContextPolyfillMiddleware struct { -} - -func (*presignContextPolyfillMiddleware) ID() string { - return "presignContextPolyfill" -} - -func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - schemeID := rscheme.Scheme.SchemeID() - ctx = s3cust.SetSignerVersion(ctx, schemeID) - if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { - if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr) - } - } else if schemeID == "aws.auth#sigv4a" { - if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) - } - } - - return next.HandleFinalize(ctx, in) -} - -type presignConverter PresignOptions - -func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - } - stack.Deserialize.Clear() - stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) - stack.Build.Remove("UserAgent") - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.Presigner, - LogSigning: options.ClientLogMode.IsSigning(), - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - - // extended s3 presigning - signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - ExpressCredentials: options.ExpressCredentials, - V4Presigner: c.Presigner, - V4aPresigner: c.presignerV4a, - LogSigning: options.ClientLogMode.IsSigning(), - }) - err = s3cust.RegisterPreSigningMiddleware(stack, signermv) - if err != nil { - return err - } - - if c.Expires < 0 { - return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires) - } - // add middleware to set expiration for s3 presigned url, if expiration is set to - // 0, this middleware sets a default expiration of 900 seconds - err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After) - if err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func withNoDefaultChecksumAPIOption(options *Options) { - options.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ - Interceptors: opts.Interceptors.BeforeRetryLoop, - }, "Retry", middleware.Before) -} - -func addInterceptAttempt(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ - BeforeAttempt: opts.Interceptors.BeforeAttempt, - AfterAttempt: opts.Interceptors.AfterAttempt, - }, "Retry", middleware.After) -} - -func addInterceptExecution(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&smithyhttp.InterceptExecution{ - BeforeExecution: opts.Interceptors.BeforeExecution, - AfterExecution: opts.Interceptors.AfterExecution, - }, middleware.Before) -} - -func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ - Interceptors: opts.Interceptors.BeforeSerialization, - }, "OperationSerializer", middleware.Before) -} - -func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ - Interceptors: opts.Interceptors.AfterSerialization, - }, "OperationSerializer", middleware.After) -} - -func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ - Interceptors: opts.Interceptors.BeforeSigning, - }, "Signing", middleware.Before) -} - -func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ - Interceptors: opts.Interceptors.AfterSigning, - }, "Signing", middleware.After) -} - -func addInterceptTransmit(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ - BeforeTransmit: opts.Interceptors.BeforeTransmit, - AfterTransmit: opts.Interceptors.AfterTransmit, - }, middleware.After) -} - -func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ - Interceptors: opts.Interceptors.BeforeDeserialization, - }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse) -} - -func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ - Interceptors: opts.Interceptors.AfterDeserialization, - }, "OperationDeserializer", middleware.Before) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go deleted file mode 100644 index 35d55ea81195..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ /dev/null @@ -1,393 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// This operation aborts a multipart upload. After a multipart upload is aborted, -// no additional parts can be uploaded using that upload ID. The storage consumed -// by any previously uploaded parts will be freed. However, if any part uploads are -// currently in progress, those part uploads might or might not succeed. As a -// result, it might be necessary to abort a given multipart upload multiple times -// in order to completely free all storage consumed by all parts. -// -// To verify that all parts have been removed and prevent getting charged for the -// part storage, you should call the [ListParts]API operation and ensure that the parts list -// is empty. -// -// - Directory buckets - If multipart uploads in a directory bucket are in -// progress, you can't delete the bucket until all the in-progress multipart -// uploads are aborted or completed. To delete these in-progress multipart uploads, -// use the ListMultipartUploads operation to list the in-progress multipart -// uploads in the bucket and use the AbortMultipartUpload operation to abort all -// the in-progress multipart uploads. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to AbortMultipartUpload : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { - if params == nil { - params = &AbortMultipartUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, c.addOperationAbortMultipartUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AbortMultipartUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AbortMultipartUploadInput struct { - - // The bucket name to which the upload was taking place. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key of the object for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Upload ID that identifies the multipart upload. - // - // This member is required. - UploadId *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // If present, this header aborts an in progress multipart upload only if it was - // initiated on the provided timestamp. If the initiated timestamp of the multipart - // upload does not match the provided value, the operation returns a 412 - // Precondition Failed error. If the initiated timestamp matches or if the - // multipart upload doesn’t exist, the operation returns a 204 Success (No Content) - // response. - // - // This functionality is only supported for directory buckets. - IfMatchInitiatedTime *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type AbortMultipartUploadOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AbortMultipartUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *AbortMultipartUploadInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AbortMultipartUpload", - } -} - -// getAbortMultipartUploadBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) { - in := input.(*AbortMultipartUploadInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getAbortMultipartUploadBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go deleted file mode 100644 index d428c98c9df7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ /dev/null @@ -1,679 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Completes a multipart upload by assembling previously uploaded parts. -// -// You first initiate the multipart upload and then upload all parts using the [UploadPart] -// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of -// an upload, you call this CompleteMultipartUpload operation to complete the -// upload. Upon receiving this request, Amazon S3 concatenates all the parts in -// ascending order by part number to create a new object. In the -// CompleteMultipartUpload request, you must provide the parts list and ensure that -// the parts list is complete. The CompleteMultipartUpload API operation -// concatenates the parts that you provide in the list. For each part in the list, -// you must provide the PartNumber value and the ETag value that are returned -// after that part was uploaded. -// -// The processing of a CompleteMultipartUpload request could take several minutes -// to finalize. After Amazon S3 begins processing the request, it sends an HTTP -// response header that specifies a 200 OK response. While processing is in -// progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. A request could fail after the initial 200 OK -// response has been sent. This means that a 200 OK response can contain either a -// success or an error. The error response might be embedded in the 200 OK -// response. If you call this API operation directly, make sure to design your -// application to parse the contents of the response and handle it appropriately. -// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect -// the embedded error and apply error handling per your configuration settings -// (including automatically retrying the request as appropriate). If the condition -// persists, the SDKs throw an exception (or, for the SDKs that don't use -// exceptions, they return an error). -// -// Note that if CompleteMultipartUpload fails, applications should be prepared to -// retry any failed requests (including 500 error responses). For more information, -// see [Amazon S3 Error Best Practices]. -// -// You can't use Content-Type: application/x-www-form-urlencoded for the -// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type -// header, CompleteMultipartUpload can still return a 200 OK response. -// -// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted -// -// with Key Management Service, you must have permission to use the kms:Decrypt -// action for the CompleteMultipartUpload request to succeed. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Special errors -// -// - Error Code: EntityTooSmall -// -// - Description: Your proposed upload is smaller than the minimum allowed -// object size. Each part must be at least 5 MB in size, except the last part. -// -// - HTTP Status Code: 400 Bad Request -// -// - Error Code: InvalidPart -// -// - Description: One or more of the specified parts could not be found. The -// part might not have been uploaded, or the specified ETag might not have matched -// the uploaded part's ETag. -// -// - HTTP Status Code: 400 Bad Request -// -// - Error Code: InvalidPartOrder -// -// - Description: The list of parts was not in ascending order. The parts list -// must be specified in order by part number. -// -// - HTTP Status Code: 400 Bad Request -// -// - Error Code: NoSuchUpload -// -// - Description: The specified multipart upload does not exist. The upload ID -// might be invalid, or the multipart upload might have been aborted or completed. -// -// - HTTP Status Code: 404 Not Found -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to CompleteMultipartUpload : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { - if params == nil { - params = &CompleteMultipartUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, c.addOperationCompleteMultipartUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CompleteMultipartUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CompleteMultipartUploadInput struct { - - // Name of the bucket to which the multipart upload was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // ID for the initiated multipart upload. - // - // This member is required. - UploadId *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum - // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // This header specifies the checksum type of the object, which determines how - // part-level checksums are combined to create an object-level checksum for - // multipart objects. You can use this header as a data integrity check to verify - // that the checksum type that is received is the same checksum that was specified. - // If the checksum type doesn’t match the checksum type that was specified for the - // object during the CreateMultipartUpload request, it’ll result in a BadDigest - // error. For more information, see Checking object integrity in the Amazon S3 User - // Guide. - ChecksumType types.ChecksumType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Uploads the object only if the ETag (entity tag) value provided during the - // WRITE operation matches the ETag of the object in S3. If the ETag values do not - // match, the operation returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should fetch the - // object's ETag, re-initiate the multipart upload with CreateMultipartUpload , and - // re-upload each part. - // - // Expects the ETag value as a string. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Uploads the object only if the object key name does not already exist in the - // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should re-initiate the - // multipart upload with CreateMultipartUpload and re-upload each part. - // - // Expects the '*' (asterisk) character. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // The expected total object size of the multipart upload request. If there’s a - // mismatch between the specified object size value and the actual object size - // value, it results in an HTTP 400 InvalidRequest error. - MpuObjectSize *int64 - - // The container for the multipart upload request information. - MultipartUpload *types.CompletedMultipartUpload - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is required only when the object was created using a checksum - // algorithm or if your bucket policy requires the use of SSE-C. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key - SSECustomerAlgorithm *string - - // The server-side encryption (SSE) customer managed key. This parameter is needed - // only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // The MD5 server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type CompleteMultipartUploadOutput struct { - - // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. - // - // Access points are not supported by directory buckets. - Bucket *string - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only - // present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum - // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // as a data integrity check to verify that the checksum type that is received is - // the same checksum type that was specified during the CreateMultipartUpload - // request. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Entity tag that identifies the newly created object's data. Objects with - // different object data will have different entity tags. The entity tag is an - // opaque string. The entity tag may or may not be an MD5 digest of the object - // data. If the entity tag is not an MD5 digest of the object data, it will contain - // one or more nonhexadecimal characters and/or will consist of less than 32 or - // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ETag *string - - // If the object expiration is configured, this will contain the expiration date ( - // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. - // - // This functionality is not supported for directory buckets. - Expiration *string - - // The object key of the newly created object. - Key *string - - // The URI that identifies the newly created object. - Location *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when storing this object in Amazon S3. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Version ID of the newly created object, in case the bucket has versioning - // turned on. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteMultipartUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CompleteMultipartUploadInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CompleteMultipartUpload", - } -} - -// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) { - in := input.(*CompleteMultipartUploadInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCompleteMultipartUploadBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go deleted file mode 100644 index f593667eaf1f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ /dev/null @@ -1,1114 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue -// support for creating new Email Grantee Access Control Lists (ACL). Email Grantee -// ACLs created prior to this date will continue to work and remain accessible -// through the Amazon Web Services Management Console, Command Line Interface -// (CLI), SDKs, and REST API. However, you will no longer be able to create new -// Email Grantee ACLs. -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// Creates a copy of an object that is already stored in Amazon S3. -// -// You can store individual objects of up to 5 TB in Amazon S3. You create a copy -// of your object up to 5 GB in size in a single atomic action using this API. -// However, to copy an object greater than 5 GB, you must use the multipart upload -// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. -// -// You can copy individual objects between general purpose buckets, between -// directory buckets, and between general purpose buckets and directory buckets. -// -// - Amazon S3 supports copy operations using Multi-Region Access Points only as -// a destination when using the Multi-Region Access Point ARN. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// - VPC endpoints don't support cross-Region requests (including copies). If -// you're using VPC endpoints, your source and destination buckets should be in the -// same Amazon Web Services Region as your VPC endpoint. -// -// Both the Region that you want to copy the object from and the Region that you -// want to copy the object to must be enabled for your account. For more -// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon -// Web Services Account Management Guide. -// -// Amazon S3 transfer acceleration does not support cross-Region copies. If you -// request a cross-Region copy using a transfer acceleration endpoint, you get a -// 400 Bad Request error. For more information, see [Transfer Acceleration]. -// -// Authentication and authorization All CopyObject requests must be authenticated -// and signed by using IAM credentials (access key ID and secret access key for the -// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source -// , must be signed. For more information, see [REST Authentication]. -// -// Directory buckets - You must use the IAM credentials to authenticate and -// authorize your access to the CopyObject API operation, instead of using the -// temporary security credentials through the CreateSession API operation. -// -// Amazon Web Services CLI or SDKs handles authentication and authorization on -// your behalf. -// -// Permissions You must have read access to the source object and write access to -// the destination bucket. -// -// - General purpose bucket permissions - You must have permissions in an IAM -// policy based on the source and destination bucket types in a CopyObject -// operation. -// -// - If the source object is in a general purpose bucket, you must have -// s3:GetObject permission to read the source object that is being copied. -// -// - If the destination bucket is a general purpose bucket, you must have -// s3:PutObject permission to write the object copy to the destination bucket. -// -// - Directory bucket permissions - You must have permissions in a bucket policy -// or an IAM identity-based policy based on the source and destination bucket types -// in a CopyObject operation. -// -// - If the source object that you want to copy is in a directory bucket, you -// must have the s3express:CreateSession permission in the Action element of a -// policy to read the object. By default, the session is in the ReadWrite mode. -// If you want to restrict the access, you can explicitly set the -// s3express:SessionMode condition key to ReadOnly on the copy source bucket. -// -// - If the copy destination is a directory bucket, you must have the -// s3express:CreateSession permission in the Action element of a policy to write -// the object to the destination. The s3express:SessionMode condition key can't -// be set to ReadOnly on the copy destination bucket. -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Response and special errors When the request is an HTTP 1.1 request, the -// response is chunk encoded. When the request is not an HTTP 1.1 request, the -// response would not contain the Content-Length . You always need to read the -// entire response body to check if the copy succeeds. -// -// - If the copy is successful, you receive a response with information about -// the copied object. -// -// - A copy request might return an error when Amazon S3 receives the copy -// request or while Amazon S3 is copying the files. A 200 OK response can contain -// either a success or an error. -// -// - If the error occurs before the copy action starts, you receive a standard -// Amazon S3 error. -// -// - If the error occurs during the copy operation, the error response is -// embedded in the 200 OK response. For example, in a cross-region copy, you may -// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] -// . The 200 OK status code means the copy was accepted, but it doesn't mean the -// copy is complete. Another example is when you disconnect from Amazon S3 before -// the copy is complete, Amazon S3 might cancel the copy and you may receive a -// 200 OK response. You must stay connected to Amazon S3 until the entire -// response is successfully received and processed. -// -// If you call this API operation directly, make sure to design your application -// -// to parse the content of the response and handle it appropriately. If you use -// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the -// embedded error and apply error handling per your configuration settings -// (including automatically retrying the request as appropriate). If the condition -// persists, the SDKs throw an exception (or, for the SDKs that don't use -// exceptions, they return an error). -// -// Charge The copy request charge is based on the storage class and Region that -// you specify for the destination object. The request can also result in a data -// retrieval charge for the source if the source storage class bills for data -// retrieval. If the copy source is in a different region, the data transfer is -// billed to the copy source account. For pricing information, see [Amazon S3 pricing]. -// -// HTTP Host header syntax -// -// - Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// - Amazon S3 on Outposts - When you use this action with S3 on Outposts -// through the REST API, you must direct requests to the S3 on Outposts hostname. -// The S3 on Outposts hostname takes the form -// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The -// hostname isn't required when you use the Amazon Web Services CLI or SDKs. -// -// The following operations are related to CopyObject : -// -// [PutObject] -// -// [GetObject] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror -// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html -// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone -// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ -func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { - if params == nil { - params = &CopyObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CopyObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CopyObjectInput struct { - - // The name of the destination bucket. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Copying objects across different Amazon Web Services Regions isn't supported - // when the source or destination bucket is in Amazon Web Services Local Zones. The - // source and destination buckets must have the same parent Amazon Web Services - // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code - // InvalidRequest . - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must use the - // Outpost bucket access point ARN or the access point alias for the destination - // bucket. You can only copy objects within the same Outpost bucket. It's not - // supported to copy objects across different Amazon Web Services Outposts, between - // buckets on the same Outposts, or between Outposts buckets and any other bucket - // types. For more information about S3 on Outposts, see [What is S3 on Outposts?]in the S3 on Outposts - // guide. When you use this action with S3 on Outposts through the REST API, you - // must direct requests to the S3 on Outposts hostname, in the format - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The - // hostname isn't required when you use the Amazon Web Services CLI or SDKs. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Specifies the source object for the copy operation. The source object can be up - // to 5 GB. If the source object is an object that was uploaded by using a - // multipart upload, the object copy will be a single part object after the source - // object is copied to the destination bucket. - // - // You specify the value of the copy source in one of two formats, depending on - // whether you want to access the source object through an [access point]: - // - // - For objects not accessed through an access point, specify the name of the - // source bucket and the key of the source object, separated by a slash (/). For - // example, to copy the object reports/january.pdf from the general purpose - // bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value - // must be URL-encoded. To copy the object reports/january.pdf from the directory - // bucket awsexamplebucket--use1-az5--x-s3 , use - // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be - // URL-encoded. - // - // - For objects accessed through access points, specify the Amazon Resource - // Name (ARN) of the object as accessed through the access point, in the format - // arn:aws:s3:::accesspoint//object/ . For example, to copy the object - // reports/january.pdf through access point my-access-point owned by account - // 123456789012 in Region us-west-2 , use the URL encoding of - // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf - // . The value must be URL encoded. - // - // - Amazon S3 supports copy operations using Access points only when the source - // and destination buckets are in the same Amazon Web Services Region. - // - // - Access points are not supported by directory buckets. - // - // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the - // ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 - // in Region us-west-2 , use the URL encoding of - // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf - // . The value must be URL-encoded. - // - // If your source bucket versioning is enabled, the x-amz-copy-source header by - // default identifies the current version of an object to copy. If the current - // version is a delete marker, Amazon S3 behaves as if the object was deleted. To - // copy a different version, use the versionId query parameter. Specifically, - // append ?versionId= to the value (for example, - // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. - // - // If you enable versioning on the destination bucket, Amazon S3 generates a - // unique version ID for the copied object. This version ID is different from the - // version ID of the source object. Amazon S3 returns the version ID of the copied - // object in the x-amz-version-id response header in the response. - // - // If you do not enable versioning or suspend it on the destination bucket, the - // version ID that Amazon S3 generates in the x-amz-version-id response header is - // always null. - // - // Directory buckets - S3 Versioning isn't enabled and supported for directory - // buckets. - // - // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html - // - // This member is required. - CopySource *string - - // The key of the destination object. - // - // This member is required. - Key *string - - // The canned access control list (ACL) to apply to the object. - // - // When you copy an object, the ACL metadata is not preserved and is set to private - // by default. Only the owner has full access control. To override the default ACL - // setting, specify a new ACL when you generate a copy request. For more - // information, see [Using ACLs]. - // - // If the destination bucket that you're copying objects to uses the bucket owner - // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect - // permissions. Buckets that use this setting only accept PUT requests that don't - // specify an ACL or PUT requests that specify bucket owner full control ACLs, - // such as the bucket-owner-full-control canned ACL or an equivalent form of this - // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 - // User Guide. - // - // - If your destination bucket uses the bucket owner enforced setting for - // Object Ownership, all objects written to the bucket by any account will be owned - // by the bucket owner. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html - // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - ACL types.ObjectCannedACL - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. - // - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. - // - // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. - // - // Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS - // encrypted objects from general purpose buckets to directory buckets, from - // directory buckets to general purpose buckets, or between directory buckets, - // through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request - // is made for a KMS-encrypted object. - // - // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - BucketKeyEnabled *bool - - // Specifies the caching behavior along the request/reply chain. - CacheControl *string - - // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // When you copy an object, if the source object has a checksum, that checksum - // value will be copied to the new object by default. If the CopyObject request - // does not include this x-amz-checksum-algorithm header, the checksum algorithm - // will be copied from the source object to the destination object (if it's present - // on the source object). You can optionally specify a different checksum algorithm - // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported - // values will respond with the HTTP status code 400 Bad Request . - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // Specifies presentational information for the object. Indicates whether an - // object should be displayed in a web browser or downloaded as a file. It allows - // specifying the desired filename for the downloaded file. - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - // - // For directory buckets, only the aws-chunked value is supported in this header - // field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // A standard MIME type that describes the format of the object data. - ContentType *string - - // Copies the object if its entity tag (ETag) matches the specified tag. - // - // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request and evaluate as follows, Amazon S3 returns - // 200 OK and copies the data: - // - // - x-amz-copy-source-if-match condition evaluates to true - // - // - x-amz-copy-source-if-unmodified-since condition evaluates to false - CopySourceIfMatch *string - - // Copies the object if it has been modified since the specified time. - // - // If both the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request and - // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response - // code: - // - // - x-amz-copy-source-if-none-match condition evaluates to false - // - // - x-amz-copy-source-if-modified-since condition evaluates to true - CopySourceIfModifiedSince *time.Time - - // Copies the object if its entity tag (ETag) is different than the specified ETag. - // - // If both the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request and - // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response - // code: - // - // - x-amz-copy-source-if-none-match condition evaluates to false - // - // - x-amz-copy-source-if-modified-since condition evaluates to true - CopySourceIfNoneMatch *string - - // Copies the object if it hasn't been modified since the specified time. - // - // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request and evaluate as follows, Amazon S3 returns - // 200 OK and copies the data: - // - // - x-amz-copy-source-if-match condition evaluates to true - // - // - x-amz-copy-source-if-unmodified-since condition evaluates to false - CopySourceIfUnmodifiedSince *time.Time - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). - // - // If the source object for the copy is stored in Amazon S3 using SSE-C, you must - // provide the necessary encryption information in your request so that Amazon S3 - // can decrypt the object for copying. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be the same - // one that was used when the source object was created. - // - // If the source object for the copy is stored in Amazon S3 using SSE-C, you must - // provide the necessary encryption information in your request so that Amazon S3 - // can decrypt the object for copying. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // If the source object for the copy is stored in Amazon S3 using SSE-C, you must - // provide the necessary encryption information in your request so that Amazon S3 - // can decrypt the object for copying. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKeyMD5 *string - - // The account ID of the expected destination bucket owner. If the account ID that - // you provide does not match the actual owner of the destination bucket, the - // request fails with the HTTP status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The account ID of the expected source bucket owner. If the account ID that you - // provide does not match the actual owner of the source bucket, the request fails - // with the HTTP status code 403 Forbidden (access denied). - ExpectedSourceBucketOwner *string - - // The date and time at which the object is no longer cacheable. - Expires *time.Time - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Allows grantee to read the object data and its metadata. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Allows grantee to read the object ACL. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantReadACP *string - - // Allows grantee to write the ACL for the applicable object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantWriteACP *string - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata that's provided in the request. When copying an object, you can - // preserve all metadata (the default) or specify new metadata. If this header - // isn’t specified, COPY is the default behavior. - // - // General purpose bucket - For general purpose buckets, when you grant - // permissions, you can use the s3:x-amz-metadata-directive condition key to - // enforce certain metadata behavior when objects are uploaded. For more - // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. - // - // x-amz-website-redirect-location is unique to each object and is not copied when - // using the x-amz-metadata-directive header. To copy the value, you must specify - // x-amz-website-redirect-location in the request header. - // - // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html - MetadataDirective types.MetadataDirective - - // Specifies whether you want to apply a legal hold to the object copy. - // - // This functionality is not supported for directory buckets. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode that you want to apply to the object copy. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // The date and time when you want the Object Lock of the object copy to expire. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256 ). - // - // When you perform a CopyObject operation, if you want to use a different type of - // encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded. Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKeyMD5 *string - - // Specifies the Amazon Web Services KMS Encryption Context as an additional - // encryption context to use for the destination object encryption. The value of - // this header is a base64-encoded UTF-8 string holding JSON with the encryption - // context key-value pairs. - // - // General purpose buckets - This value must be explicitly added to specify - // encryption context for CopyObject requests if you want an additional encryption - // context for your destination object. The additional encryption context of the - // source object won't be copied to the destination object. For more information, - // see [Encryption context]in the Amazon S3 User Guide. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - // - // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context - SSEKMSEncryptionContext *string - - // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object - // encryption. All GET and PUT requests for an object protected by KMS will fail if - // they're not made via SSL or using SigV4. For information about configuring any - // of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. - // - // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify - // the x-amz-server-side-encryption header to aws:kms . Then, the - // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's - // default KMS customer managed key ID. If you want to explicitly set the - // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's - // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS - // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 - // ) isn't supported. - // - // Incorrect key specification results in an HTTP 400 Bad Request error. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm used when storing this object in Amazon - // S3. Unrecognized or unsupported values won’t write a destination object and will - // receive a 400 Bad Request response. - // - // Amazon S3 automatically encrypts all new objects that are copied to an S3 - // bucket. When copying an object, if you don't specify encryption information in - // your copy request, the encryption setting of the target object is set to the - // default encryption configuration of the destination bucket. By default, all - // buckets have a base level of encryption configuration that uses server-side - // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a - // different default encryption configuration, Amazon S3 uses the corresponding - // encryption key to encrypt the target object copy. - // - // With server-side encryption, Amazon S3 encrypts your data as it writes your - // data to disks in its data centers and decrypts the data when you access it. For - // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. - // - // General purpose buckets - // - // - For general purpose buckets, there are the following supported options for - // server-side encryption: server-side encryption with Key Management Service (KMS) - // keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS - // keys (DSSE-KMS), and server-side encryption with customer-provided encryption - // keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided - // key to encrypt the target object copy. - // - // - When you perform a CopyObject operation, if you want to use a different type - // of encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // - // Directory buckets - // - // - For directory buckets, there are only two supported options for server-side - // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( - // AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We - // recommend that the bucket's default encryption uses the desired encryption - // configuration and you don't override the bucket default encryption in your - // CreateSession requests or PUT object requests. Then, new objects are - // automatically encrypted with the desired encryption settings. For more - // information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the - // encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. - // - // - To encrypt new object copies to a directory bucket with SSE-KMS, we - // recommend you specify SSE-KMS as the directory bucket's default encryption - // configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't - // supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket - // for the lifetime of the bucket. After you specify a customer managed key for - // SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS - // configuration. Then, when you perform a CopyObject operation and want to - // specify server-side encryption settings for new object copies with SSE-KMS in - // the encryption-related request headers, you must ensure the encryption key is - // the same customer managed key that you specified for the directory bucket's - // default encryption configuration. - // - // - S3 access points for Amazon FSx - When accessing data stored in Amazon FSx - // file systems using S3 access points, the only valid server side encryption - // option is aws:fsx . All Amazon FSx file systems have encryption configured by - // default and are encrypted at rest. Data is automatically encrypted before being - // written to the file system, and automatically decrypted as it is read. These - // processes are handled transparently by Amazon FSx. - // - // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html - // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - ServerSideEncryption types.ServerSideEncryption - - // If the x-amz-storage-class header is not used, the copied object will be stored - // in the STANDARD Storage Class by default. The STANDARD storage class provides - // high durability and high availability. Depending on performance needs, you can - // specify a different Storage Class. - // - // - Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported - // storage class values won't write a destination object and will respond with the - // HTTP status code 400 Bad Request . - // - // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. - // - // You can use the CopyObject action to change the storage class of an object that - // is already stored in Amazon S3 by using the x-amz-storage-class header. For - // more information, see [Storage Classes]in the Amazon S3 User Guide. - // - // Before using an object as a source object for the copy operation, you must - // restore a copy of it if it meets any of the following conditions: - // - // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . - // - // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is - // Archive Access or Deep Archive Access . - // - // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html - // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html - // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition - StorageClass types.StorageClass - - // The tag-set for the object copy in the destination bucket. This value must be - // used in conjunction with the x-amz-tagging-directive if you choose REPLACE for - // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive - // , you don't need to set the x-amz-tagging header, because the tag-set will be - // copied from the source object directly. The tag-set must be encoded as URL Query - // parameters. - // - // The default value is the empty value. - // - // Directory buckets - For directory buckets in a CopyObject operation, only the - // empty tag-set is supported. Any requests that attempt to write non-empty tags - // into directory buckets will receive a 501 Not Implemented status code. When the - // destination bucket is a directory bucket, you will receive a 501 Not Implemented - // response in any of the following situations: - // - // - When you attempt to COPY the tag-set from an S3 source object that has - // non-empty tags. - // - // - When you attempt to REPLACE the tag-set of a source object and set a - // non-empty value to x-amz-tagging . - // - // - When you don't set the x-amz-tagging-directive header and the source object - // has non-empty tags. This is because the default value of - // x-amz-tagging-directive is COPY . - // - // Because only the empty tag-set is supported for directory buckets in a - // CopyObject operation, the following situations are allowed: - // - // - When you attempt to COPY the tag-set from a directory bucket source object - // that has no tags to a general purpose bucket. It copies an empty tag-set to the - // destination object. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and set the x-amz-tagging value of the directory bucket destination object to - // empty. - // - // - When you attempt to REPLACE the tag-set of a general purpose bucket source - // object that has non-empty tags and set the x-amz-tagging value of the - // directory bucket destination object to empty. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and don't set the x-amz-tagging value of the directory bucket destination - // object. This is because the default value of x-amz-tagging is the empty value. - Tagging *string - - // Specifies whether the object tag-set is copied from the source object or - // replaced with the tag-set that's provided in the request. - // - // The default value is COPY . - // - // Directory buckets - For directory buckets in a CopyObject operation, only the - // empty tag-set is supported. Any requests that attempt to write non-empty tags - // into directory buckets will receive a 501 Not Implemented status code. When the - // destination bucket is a directory bucket, you will receive a 501 Not Implemented - // response in any of the following situations: - // - // - When you attempt to COPY the tag-set from an S3 source object that has - // non-empty tags. - // - // - When you attempt to REPLACE the tag-set of a source object and set a - // non-empty value to x-amz-tagging . - // - // - When you don't set the x-amz-tagging-directive header and the source object - // has non-empty tags. This is because the default value of - // x-amz-tagging-directive is COPY . - // - // Because only the empty tag-set is supported for directory buckets in a - // CopyObject operation, the following situations are allowed: - // - // - When you attempt to COPY the tag-set from a directory bucket source object - // that has no tags to a general purpose bucket. It copies an empty tag-set to the - // destination object. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and set the x-amz-tagging value of the directory bucket destination object to - // empty. - // - // - When you attempt to REPLACE the tag-set of a general purpose bucket source - // object that has non-empty tags and set the x-amz-tagging value of the - // directory bucket destination object to empty. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and don't set the x-amz-tagging value of the directory bucket destination - // object. This is because the default value of x-amz-tagging is the empty value. - TaggingDirective types.TaggingDirective - - // If the destination bucket is configured as a website, redirects requests for - // this object copy to another object in the same bucket or to an external URL. - // Amazon S3 stores the value of this header in the object metadata. This value is - // unique to each object and is not copied when using the x-amz-metadata-directive - // header. Instead, you may opt to provide this header in combination with the - // x-amz-metadata-directive header. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - noSmithyDocumentSerde -} - -func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.CopySource = in.CopySource - p.Key = in.Key - p.DisableS3ExpressSessionAuth = ptr.Bool(true) -} - -type CopyObjectOutput struct { - - // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Container for all response elements. - CopyObjectResult *types.CopyObjectResult - - // Version ID of the source object that was copied. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceVersionId *string - - // If the object expiration is configured, the response includes this header. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - Expiration *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. - SSEKMSEncryptionContext *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Version ID of the newly created copy. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CopyObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCopyObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CopyObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CopyObject", - } -} - -// getCopyObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getCopyObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*CopyObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCopyObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go deleted file mode 100644 index e5365fbf2bd0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue -// support for creating new Email Grantee Access Control Lists (ACL). Email Grantee -// ACLs created prior to this date will continue to work and remain accessible -// through the Amazon Web Services Management Console, Command Line Interface -// (CLI), SDKs, and REST API. However, you will no longer be able to create new -// Email Grantee ACLs. -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts -// bucket, see [CreateBucket]CreateBucket . -// -// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have -// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous -// requests are never allowed to create buckets. By creating the bucket, you become -// the bucket owner. -// -// There are two types of buckets: general purpose buckets and directory buckets. -// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. -// -// - General purpose buckets - If you send your CreateBucket request to the -// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So -// the signature calculations in Signature Version 4 must use us-east-1 as the -// Region, even if the location constraint in the request specifies another Region -// where the bucket is to be created. If you create a bucket in a Region other than -// US East (N. Virginia), your application must be able to handle 307 redirect. For -// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Regional endpoint. These endpoints support path-style -// requests in the format -// https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - In addition to the s3:CreateBucket -// permission, the following permissions are required in a policy when your -// CreateBucket request includes specific headers: -// -// - Access control lists (ACLs) - In your CreateBucket request, if you specify -// an access control list (ACL) and set it to public-read , public-read-write , -// authenticated-read , or if you explicitly specify any other custom ACLs, both -// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your -// CreateBucket request, if you set the ACL to private , or if you don't specify -// any ACLs, only the s3:CreateBucket permission is required. -// -// - Object Lock - In your CreateBucket request, if you set -// x-amz-bucket-object-lock-enabled to true, the -// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are -// required. -// -// - S3 Object Ownership - If your CreateBucket request includes the -// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. -// -// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly -// -// set S3 Object Ownership for the bucket to a different value than the default, -// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public -// access, you must first create the bucket (without the bucket ACL) and then -// explicitly disable Block Public Access on the bucket before using PutBucketAcl -// to set the ACL. If you try to create a bucket with a public ACL, the request -// will fail. -// -// For the majority of modern use cases in S3, we recommend that you keep all -// -// Block Public Access settings enabled and keep ACLs disabled. If you would like -// to share data with users outside of your account, you can use bucket policies as -// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. -// -// - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. Specifically, -// you can create a new bucket with Block Public Access enabled, then separately -// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the -// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block -// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - You must have the s3express:CreateBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the -// Amazon S3 User Guide. -// -// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public -// -// Access are not supported for directory buckets. For directory buckets, all Block -// Public Access settings are enabled at the bucket level and S3 Object Ownership -// is set to Bucket owner enforced (ACLs disabled). These settings can't be -// modified. -// -// For more information about permissions for creating and working with directory -// -// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about -// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to CreateBucket : -// -// [PutObject] -// -// [DeleteBucket] -// -// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html -// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html -func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { - if params == nil { - params = &CreateBucketInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, c.addOperationCreateBucketMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateBucketOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateBucketInput struct { - - // The name of the bucket to create. - // - // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] - // in the Amazon S3 User Guide. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html - // - // This member is required. - Bucket *string - - // The canned ACL to apply to the bucket. - // - // This functionality is not supported for directory buckets. - ACL types.BucketCannedACL - - // The configuration information for the bucket. - CreateBucketConfiguration *types.CreateBucketConfiguration - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - // - // This functionality is not supported for directory buckets. - GrantFullControl *string - - // Allows grantee to list the objects in the bucket. - // - // This functionality is not supported for directory buckets. - GrantRead *string - - // Allows grantee to read the bucket ACL. - // - // This functionality is not supported for directory buckets. - GrantReadACP *string - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions and - // overwrites of those objects. - // - // This functionality is not supported for directory buckets. - GrantWrite *string - - // Allows grantee to write the ACL for the applicable bucket. - // - // This functionality is not supported for directory buckets. - GrantWriteACP *string - - // Specifies whether you want S3 Object Lock to be enabled for the new bucket. - // - // This functionality is not supported for directory buckets. - ObjectLockEnabledForBucket *bool - - // The container element for object ownership for a bucket's ownership controls. - // - // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the - // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. - // - // ObjectWriter - The uploading account will own the object if the object is - // uploaded with the bucket-owner-full-control canned ACL. - // - // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer - // affect permissions. The bucket owner automatically owns and has full control - // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or specify bucket owner full control ACLs (such as the predefined - // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). - // - // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are - // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where - // you must control access for each object individually. For more information about - // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. Directory buckets - // use the bucket owner enforced setting for S3 Object Ownership. - // - // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - ObjectOwnership types.ObjectOwnership - - noSmithyDocumentSerde -} - -func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) - p.DisableAccessPoints = ptr.Bool(true) -} - -type CreateBucketOutput struct { - - // The Amazon Resource Name (ARN) of the S3 bucket. ARNs uniquely identify Amazon - // Web Services resources across all of Amazon Web Services. - // - // This parameter is only supported for S3 directory buckets. For more - // information, see [Using tags with directory buckets]. - // - // [Using tags with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html - BucketArn *string - - // A forward slash followed by the name of the bucket. - Location *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucket"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateBucketValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateBucket", - } -} - -// getCreateBucketBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getCreateBucketBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateBucketInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateBucketBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: false, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataConfiguration.go deleted file mode 100644 index aef91a974a39..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataConfiguration.go +++ /dev/null @@ -1,345 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates an S3 Metadata V2 metadata configuration for a general purpose bucket. -// For more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the following permissions. For -// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// If you want to encrypt your metadata tables with server-side encryption with -// Key Management Service (KMS) keys (SSE-KMS), you need additional permissions in -// your KMS key policy. For more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// If you also want to integrate your table bucket with Amazon Web Services -// analytics services so that you can query your metadata table, you need -// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide. -// -// To query your metadata tables, you need additional permissions. For more -// information, see [Permissions for querying metadata tables]in the Amazon S3 User Guide. -// -// - s3:CreateBucketMetadataTableConfiguration -// -// The IAM policy action name is the same for the V1 and V2 API operations. -// -// - s3tables:CreateTableBucket -// -// - s3tables:CreateNamespace -// -// - s3tables:GetTable -// -// - s3tables:CreateTable -// -// - s3tables:PutTablePolicy -// -// - s3tables:PutTableEncryption -// -// - kms:DescribeKey -// -// The following operations are related to CreateBucketMetadataConfiguration : -// -// [DeleteBucketMetadataConfiguration] -// -// [GetBucketMetadataConfiguration] -// -// [UpdateBucketMetadataInventoryTableConfiguration] -// -// [UpdateBucketMetadataJournalTableConfiguration] -// -// [GetBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataConfiguration.html -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [UpdateBucketMetadataJournalTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataJournalTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [Permissions for querying metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-bucket-query-permissions.html -// [UpdateBucketMetadataInventoryTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataInventoryTableConfiguration.html -// [DeleteBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataConfiguration.html -// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html -func (c *Client) CreateBucketMetadataConfiguration(ctx context.Context, params *CreateBucketMetadataConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataConfigurationOutput, error) { - if params == nil { - params = &CreateBucketMetadataConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataConfiguration", params, optFns, c.addOperationCreateBucketMetadataConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateBucketMetadataConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateBucketMetadataConfigurationInput struct { - - // The general purpose bucket that you want to create the metadata configuration - // for. - // - // This member is required. - Bucket *string - - // The contents of your metadata configuration. - // - // This member is required. - MetadataConfiguration *types.MetadataConfiguration - - // The checksum algorithm to use with your metadata configuration. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Content-MD5 header for the metadata configuration. - ContentMD5 *string - - // The expected owner of the general purpose bucket that corresponds to your - // metadata configuration. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *CreateBucketMetadataConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type CreateBucketMetadataConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateBucketMetadataConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateBucketMetadataConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateBucketMetadataConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addCreateBucketMetadataConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateBucketMetadataConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateBucketMetadataConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateBucketMetadataConfiguration", - } -} - -// getCreateBucketMetadataConfigurationRequestAlgorithmMember gets the request -// checksum algorithm value provided as input. -func getCreateBucketMetadataConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*CreateBucketMetadataConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addCreateBucketMetadataConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getCreateBucketMetadataConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getCreateBucketMetadataConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getCreateBucketMetadataConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateBucketMetadataConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateBucketMetadataConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateBucketMetadataConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go deleted file mode 100644 index f920f7aaf363..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// We recommend that you create your S3 Metadata configurations by using the V2 [CreateBucketMetadataConfiguration] -// -// API operation. We no longer recommend using the V1 -// CreateBucketMetadataTableConfiguration API operation. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// Creates a V1 S3 Metadata configuration for a general purpose bucket. For more -// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the following permissions. For -// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// If you want to encrypt your metadata tables with server-side encryption with -// Key Management Service (KMS) keys (SSE-KMS), you need additional permissions. -// For more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// If you also want to integrate your table bucket with Amazon Web Services -// analytics services so that you can query your metadata table, you need -// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide. -// -// - s3:CreateBucketMetadataTableConfiguration -// -// - s3tables:CreateNamespace -// -// - s3tables:GetTable -// -// - s3tables:CreateTable -// -// - s3tables:PutTablePolicy -// -// The following operations are related to CreateBucketMetadataTableConfiguration : -// -// [DeleteBucketMetadataTableConfiguration] -// -// [GetBucketMetadataTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html -// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html -func (c *Client) CreateBucketMetadataTableConfiguration(ctx context.Context, params *CreateBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataTableConfigurationOutput, error) { - if params == nil { - params = &CreateBucketMetadataTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataTableConfiguration", params, optFns, c.addOperationCreateBucketMetadataTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateBucketMetadataTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateBucketMetadataTableConfigurationInput struct { - - // The general purpose bucket that you want to create the metadata table - // configuration for. - // - // This member is required. - Bucket *string - - // The contents of your metadata table configuration. - // - // This member is required. - MetadataTableConfiguration *types.MetadataTableConfiguration - - // The checksum algorithm to use with your metadata table configuration. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Content-MD5 header for the metadata table configuration. - ContentMD5 *string - - // The expected owner of the general purpose bucket that corresponds to your - // metadata table configuration. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *CreateBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type CreateBucketMetadataTableConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateBucketMetadataTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateBucketMetadataTableConfiguration", - } -} - -// getCreateBucketMetadataTableConfigurationRequestAlgorithmMember gets the -// request checksum algorithm value provided as input. -func getCreateBucketMetadataTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*CreateBucketMetadataTableConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getCreateBucketMetadataTableConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getCreateBucketMetadataTableConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getCreateBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateBucketMetadataTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateBucketMetadataTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go deleted file mode 100644 index 1195c232d094..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ /dev/null @@ -1,1056 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue -// support for creating new Email Grantee Access Control Lists (ACL). Email Grantee -// ACLs created prior to this date will continue to work and remain accessible -// through the Amazon Web Services Management Console, Command Line Interface -// (CLI), SDKs, and REST API. However, you will no longer be able to create new -// Email Grantee ACLs. -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This action initiates a multipart upload and returns an upload ID. This upload -// ID is used to associate all of the parts in the specific multipart upload. You -// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). -// You also include this upload ID in the final request to either complete or abort -// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] -// in the Amazon S3 User Guide. -// -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or abort -// the multipart upload. Amazon S3 frees up the space used to store the parts and -// stops charging you for storing them only after you either complete or abort a -// multipart upload. -// -// If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the created multipart upload must be completed within the number of days -// specified in the bucket lifecycle configuration. Otherwise, the incomplete -// multipart upload becomes eligible for an abort action and Amazon S3 aborts the -// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. -// -// - Directory buckets - S3 Lifecycle is not supported by directory buckets. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Request signing For request signing, multipart upload is just a series of -// regular requests. You initiate a multipart upload, send one or more requests to -// upload parts, and then complete the multipart upload process. You sign each -// request individually. There is nothing special about signing multipart upload -// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - To perform a multipart upload with -// encryption using an Key Management Service (KMS) KMS key, the requester must -// have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. -// The requester must also have permissions for the kms:GenerateDataKey action -// for the CreateMultipartUpload API. Then, the requester needs permissions for -// the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These -// permissions are required because Amazon S3 must decrypt and read data from the -// encrypted file parts before it completes the multipart upload. For more -// information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Encryption -// -// - General purpose buckets - Server-side encryption is for data encryption at -// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. Amazon S3 automatically encrypts all new -// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you -// don't specify encryption information in your request, the encryption setting of -// the uploaded parts is set to the default encryption configuration of the -// destination bucket. By default, all buckets have a base level of encryption -// configuration that uses server-side encryption with Amazon S3 managed keys -// (SSE-S3). If the destination bucket has a default encryption configuration that -// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), -// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding -// KMS key, or a customer-provided key to encrypt the uploaded parts. When you -// perform a CreateMultipartUpload operation, if you want to use a different type -// of encryption setting for the uploaded parts, you can request that Amazon S3 -// encrypts the object with a different encryption key (such as an Amazon S3 -// managed key, a KMS key, or a customer-provided key). When the encryption setting -// in your request is different from the default encryption configuration of the -// destination bucket, the encryption setting in your request takes precedence. If -// you choose to provide your own encryption key, the request headers you provide -// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload -// request. -// -// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( -// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) -// – If you want Amazon Web Services to manage the keys used to encrypt data, -// specify the following headers in the request. -// -// - x-amz-server-side-encryption -// -// - x-amz-server-side-encryption-aws-kms-key-id -// -// - x-amz-server-side-encryption-context -// -// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide -// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web -// Services managed key ( aws/s3 key) in KMS to protect the data. -// -// - To perform a multipart upload with encryption by using an Amazon Web -// Services KMS key, the requester must have permission to the kms:Decrypt and -// kms:GenerateDataKey* actions on the key. These permissions are required -// because Amazon S3 must decrypt and read data from the encrypted file parts -// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in -// the Amazon S3 User Guide. -// -// - If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the KMS key, then you must have these permissions -// on the key policy. If your IAM user or role is in a different account from the -// key, then you must have the permissions on both the key policy and your IAM user -// or role. -// -// - All GET and PUT requests for an object protected by KMS fail if you don't -// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), -// or Signature Version 4. For information about configuring any of the officially -// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the -// Amazon S3 User Guide. -// -// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] -// -// in the Amazon S3 User Guide. -// -// - Use customer-provided encryption keys (SSE-C) – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about server-side encryption with customer-provided -// -// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: server-side encryption with Amazon S3 -// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses -// the desired encryption configuration and you don't override the bucket default -// encryption in your CreateSession requests or PUT object requests. Then, new -// objects are automatically encrypted with the desired encryption settings. For -// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about -// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. -// -// In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the -// -// encryption request headers must match the encryption settings that are specified -// in the CreateSession request. You can't override the values of the encryption -// settings ( x-amz-server-side-encryption , -// x-amz-server-side-encryption-aws-kms-key-id , -// x-amz-server-side-encryption-context , and -// x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the -// CreateSession request. You don't need to explicitly specify these encryption -// settings values in Zonal endpoint API calls, and Amazon S3 will use the -// encryption settings values from the CreateSession request to protect new -// objects in the directory bucket. -// -// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the -// -// session token refreshes automatically to avoid service interruptions when a -// session expires. The CLI or the Amazon Web Services SDKs use the bucket's -// default encryption configuration for the CreateSession request. It's not -// supported to override the encryption settings values in the CreateSession -// request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption -// request headers must match the default encryption configuration of the directory -// bucket. -// -// For directory buckets, when you perform a CreateMultipartUpload operation and an -// -// UploadPartCopy operation, the request headers you provide in the -// CreateMultipartUpload request must match the default encryption configuration -// of the destination bucket. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to CreateMultipartUpload : -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version -// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html -// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { - if params == nil { - params = &CreateMultipartUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, c.addOperationCreateMultipartUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateMultipartUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateMultipartUploadInput struct { - - // The name of the bucket where the multipart upload is initiated and where the - // object is uploaded. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload is to be initiated. - // - // This member is required. - Key *string - - // The canned ACL to apply to the object. Amazon S3 supports a set of predefined - // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and - // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can grant access permissions to individual Amazon - // Web Services accounts or to predefined groups defined by Amazon S3. These - // permissions are then added to the access control list (ACL) on the new object. - // For more information, see [Using ACLs]. One way to grant the permissions using the request - // headers is to specify a canned ACL with the x-amz-acl request header. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL - // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html - ACL types.ObjectCannedACL - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // - // General purpose buckets - Setting this header to true causes Amazon S3 to use - // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this - // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. - // - // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT - // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't - // supported, when you copy SSE-KMS encrypted objects from general purpose buckets - // to directory buckets, from directory buckets to general purpose buckets, or - // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a - // call to KMS every time a copy request is made for a KMS-encrypted object. - // - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // Indicates the checksum type that you want Amazon S3 to use to calculate the - // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Specifies presentational information for the object. - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - // - // For directory buckets, only the aws-chunked value is supported in this header - // field. - ContentEncoding *string - - // The language that the content is in. - ContentLanguage *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The date and time at which the object is no longer cacheable. - Expires *time.Time - - // Specify access permissions explicitly to give the grantee READ, READ_ACP, and - // WRITE_ACP permissions on the object. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantFullControl *string - - // Specify access permissions explicitly to allow grantee to read the object data - // and its metadata. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantRead *string - - // Specify access permissions explicitly to allows grantee to read the object ACL. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantReadACP *string - - // Specify access permissions explicitly to allows grantee to allow grantee to - // write the ACL for the applicable object. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantWriteACP *string - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Specifies whether you want to apply a legal hold to the uploaded object. - // - // This functionality is not supported for directory buckets. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // Specifies the Object Lock mode that you want to apply to the uploaded object. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // Specifies the date and time when you want the Object Lock to expire. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the customer-provided encryption key - // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a Base64 encoded string of a UTF-8 - // encoded JSON, which contains the encryption context as key-value pairs. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - SSEKMSEncryptionContext *string - - // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object - // encryption. If the KMS key doesn't exist in the same account that's issuing the - // command, you must use the full Key ARN not the Key ID. - // - // General purpose buckets - If you specify x-amz-server-side-encryption with - // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key - // Alias) of the KMS key to use. If you specify - // x-amz-server-side-encryption:aws:kms or - // x-amz-server-side-encryption:aws:kms:dsse , but do not provide - // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. - // - // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify - // the x-amz-server-side-encryption header to aws:kms . Then, the - // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's - // default KMS customer managed key ID. If you want to explicitly set the - // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's - // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS - // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 - // ) isn't supported. - // - // Incorrect key specification results in an HTTP 400 Bad Request error. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // - Directory buckets - For directory buckets, there are only two supported - // options for server-side encryption: server-side encryption with Amazon S3 - // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys - // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses - // the desired encryption configuration and you don't override the bucket default - // encryption in your CreateSession requests or PUT object requests. Then, new - // objects are automatically encrypted with the desired encryption settings. For - // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about - // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. - // - // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the - // encryption request headers must match the encryption settings that are specified - // in the CreateSession request. You can't override the values of the encryption - // settings ( x-amz-server-side-encryption , - // x-amz-server-side-encryption-aws-kms-key-id , - // x-amz-server-side-encryption-context , and - // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the - // CreateSession request. You don't need to explicitly specify these encryption - // settings values in Zonal endpoint API calls, and Amazon S3 will use the - // encryption settings values from the CreateSession request to protect new - // objects in the directory bucket. - // - // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the - // session token refreshes automatically to avoid service interruptions when a - // session expires. The CLI or the Amazon Web Services SDKs use the bucket's - // default encryption configuration for the CreateSession request. It's not - // supported to override the encryption settings values in the CreateSession - // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption - // request headers must match the default encryption configuration of the directory - // bucket. - // - // - S3 access points for Amazon FSx - When accessing data stored in Amazon FSx - // file systems using S3 access points, the only valid server side encryption - // option is aws:fsx . All Amazon FSx file systems have encryption configured by - // default and are encrypted at rest. Data is automatically encrypted before being - // written to the file system, and automatically decrypted as it is read. These - // processes are handled transparently by Amazon FSx. - // - // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - ServerSideEncryption types.ServerSideEncryption - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high - // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. - // - // - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone - // storage class) in Availability Zones and ONEZONE_IA (the S3 One - // Zone-Infrequent Access storage class) in Dedicated Local Zones. - // - // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - // - // This functionality is not supported for directory buckets. - Tagging *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - noSmithyDocumentSerde -} - -func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type CreateMultipartUploadOutput struct { - - // If the bucket has a lifecycle rule configured with an action to abort - // incomplete multipart uploads and the prefix in the lifecycle rule matches the - // object name in the request, the response includes this header. The header - // indicates when the initiated multipart upload becomes eligible for an abort - // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. - // - // The response also includes the x-amz-abort-rule-id header that provides the ID - // of the lifecycle configuration rule that defines the abort action. - // - // This functionality is not supported for directory buckets. - // - // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config - AbortDate *time.Time - - // This header is returned along with the x-amz-abort-date header. It identifies - // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - // - // This functionality is not supported for directory buckets. - AbortRuleId *string - - // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. - // - // Access points are not supported by directory buckets. - Bucket *string - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm types.ChecksumAlgorithm - - // Indicates the checksum type that you want Amazon S3 to use to calculate the - // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Object key for which the multipart upload was initiated. - Key *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded string of a - // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. - SSEKMSEncryptionContext *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // ID for the initiated multipart upload. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateMultipartUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateMultipartUploadInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateMultipartUpload", - } -} - -// getCreateMultipartUploadBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateMultipartUploadInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateMultipartUploadBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go deleted file mode 100644 index 2ed480363d2f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go +++ /dev/null @@ -1,475 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a session that establishes temporary security credentials to support -// fast authentication and authorization for the Zonal endpoint API operations on -// directory buckets. For more information about Zonal endpoint API operations that -// include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 -// User Guide. -// -// To make Zonal endpoint API requests on a directory bucket, use the CreateSession -// API operation. Specifically, you grant s3express:CreateSession permission to a -// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM -// credentials to make the CreateSession API request on the bucket, which returns -// temporary security credentials that include the access key ID, secret access -// key, session token, and expiration. These credentials have associated -// permissions to access the Zonal endpoint API operations. After the session is -// created, you don’t need to use other policies to grant permissions to each Zonal -// endpoint API individually. Instead, in your Zonal endpoint API requests, you -// sign your requests by applying the temporary security credentials of the session -// to the request headers and following the SigV4 protocol for authentication. You -// also apply the session token to the x-amz-s3session-token request header for -// authorization. Temporary security credentials are scoped to the bucket and -// expire after 5 minutes. After the expiration time, any calls that you make with -// those credentials will fail. You must use IAM credentials again to make a -// CreateSession API request that generates a new set of temporary credentials for -// use. Temporary credentials cannot be extended or refreshed beyond the original -// specified interval. -// -// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes -// automatically to avoid service interruptions when a session expires. We -// recommend that you use the Amazon Web Services SDKs to initiate and manage -// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 -// User Guide. -// -// - You must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style -// requests are not supported. For more information about endpoints in Availability -// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints -// in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// - CopyObject API operation - Unlike other Zonal endpoint API operations, the -// CopyObject API operation doesn't use the temporary security credentials -// returned from the CreateSession API operation for authentication and -// authorization. For information about authentication and authorization of the -// CopyObject API operation on directory buckets, see [CopyObject]. -// -// - HeadBucket API operation - Unlike other Zonal endpoint API operations, the -// HeadBucket API operation doesn't use the temporary security credentials -// returned from the CreateSession API operation for authentication and -// authorization. For information about authentication and authorization of the -// HeadBucket API operation on directory buckets, see [HeadBucket]. -// -// Permissions To obtain temporary security credentials, you must create a bucket -// policy or an IAM identity-based policy that grants s3express:CreateSession -// permission to the bucket. In a policy, you can have the s3express:SessionMode -// condition key to control who can create a ReadWrite or ReadOnly session. For -// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] -// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 -// User Guide. -// -// To grant cross-account access to Zonal endpoint API operations, the bucket -// policy should also grant both accounts the s3express:CreateSession permission. -// -// If you want to encrypt objects with SSE-KMS, you must also have the -// kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based -// policies and KMS key policies for the target KMS key. -// -// Encryption For directory buckets, there are only two supported options for -// server-side encryption: server-side encryption with Amazon S3 managed keys -// (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms -// ). We recommend that the bucket's default encryption uses the desired encryption -// configuration and you don't override the bucket default encryption in your -// CreateSession requests or PUT object requests. Then, new objects are -// automatically encrypted with the desired encryption settings. For more -// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the -// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. -// -// For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low -// latency. To encrypt new objects in a directory bucket with SSE-KMS, you must -// specify SSE-KMS as the directory bucket's default encryption configuration with -// a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal -// endpoint API operations, new objects are automatically encrypted and decrypted -// with SSE-KMS and S3 Bucket Keys during the session. -// -// Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key] ( -// aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default -// encryption configuration with a customer managed key, you can't change the -// customer managed key for the bucket's SSE-KMS configuration. -// -// In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't -// override the values of the encryption settings ( x-amz-server-side-encryption , -// x-amz-server-side-encryption-aws-kms-key-id , -// x-amz-server-side-encryption-context , and -// x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession -// request. You don't need to explicitly specify these encryption settings values -// in Zonal endpoint API calls, and Amazon S3 will use the encryption settings -// values from the CreateSession request to protect new objects in the directory -// bucket. -// -// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the -// session token refreshes automatically to avoid service interruptions when a -// session expires. The CLI or the Amazon Web Services SDKs use the bucket's -// default encryption configuration for the CreateSession request. It's not -// supported to override the encryption settings values in the CreateSession -// request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not -// supported to override the values of the encryption settings from the -// CreateSession request. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html -// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters -// [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { - if params == nil { - params = &CreateSessionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateSessionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateSessionInput struct { - - // The name of the bucket that you create a session for. - // - // This member is required. - Bucket *string - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using KMS keys (SSE-KMS). - // - // S3 Bucket Keys are always enabled for GET and PUT operations in a directory - // bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy - // SSE-KMS encrypted objects from general purpose buckets to directory buckets, - // from directory buckets to general purpose buckets, or between directory buckets, - // through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a - // copy request is made for a KMS-encrypted object. - // - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - // Specifies the Amazon Web Services KMS Encryption Context as an additional - // encryption context to use for object encryption. The value of this header is a - // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption - // context as key-value pairs. This value is stored as object metadata and - // automatically gets passed on to Amazon Web Services KMS for future GetObject - // operations on this object. - // - // General purpose buckets - This value must be explicitly added during CopyObject - // operations if you want an additional encryption context for your object. For - // more information, see [Encryption context]in the Amazon S3 User Guide. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - // - // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context - SSEKMSEncryptionContext *string - - // If you specify x-amz-server-side-encryption with aws:kms , you must specify the - // x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key - // ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you - // get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key - // alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist - // in the same account that't issuing the command, you must use the full Key ARN - // not the Key ID. - // - // Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket's lifetime. - // The [Amazon Web Services managed key]( aws/s3 ) isn't supported. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm to use when you store objects in the - // directory bucket. - // - // For directory buckets, there are only two supported options for server-side - // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 - // ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default, - // Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3 - // User Guide. - // - // S3 access points for Amazon FSx - When accessing data stored in Amazon FSx file - // systems using S3 access points, the only valid server side encryption option is - // aws:fsx . All Amazon FSx file systems have encryption configured by default and - // are encrypted at rest. Data is automatically encrypted before being written to - // the file system, and automatically decrypted as it is read. These processes are - // handled transparently by Amazon FSx. - // - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html - ServerSideEncryption types.ServerSideEncryption - - // Specifies the mode of the session that will be created, either ReadWrite or - // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is - // capable of executing all the Zonal endpoint API operations on a directory - // bucket. A ReadOnly session is constrained to execute the following Zonal - // endpoint API operations: GetObject , HeadObject , ListObjectsV2 , - // GetObjectAttributes , ListParts , and ListMultipartUploads . - SessionMode types.SessionMode - - noSmithyDocumentSerde -} - -func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.DisableS3ExpressSessionAuth = ptr.Bool(true) -} - -type CreateSessionOutput struct { - - // The established temporary security credentials for the created session. - // - // This member is required. - Credentials *types.SessionCredentials - - // Indicates whether to use an S3 Bucket Key for server-side encryption with KMS - // keys (SSE-KMS). - BucketKeyEnabled *bool - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded string of a - // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. - // This value is stored as object metadata and automatically gets passed on to - // Amazon Web Services KMS for future GetObject operations on this object. - SSEKMSEncryptionContext *string - - // If you specify x-amz-server-side-encryption with aws:kms , this header indicates - // the ID of the KMS symmetric encryption customer managed key that was used for - // object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store objects in the - // directory bucket. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateSessionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateSessionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateSession", - } -} - -// getCreateSessionBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getCreateSessionBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateSessionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateSessionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go deleted file mode 100644 index dda0b97174a3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ /dev/null @@ -1,336 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the S3 bucket. All objects (including all object versions and delete -// markers) in the bucket must be deleted before the bucket itself can be deleted. -// -// - Directory buckets - If multipart uploads in a directory bucket are in -// progress, you can't delete the bucket until all the in-progress multipart -// uploads are aborted or completed. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Regional endpoint. These endpoints support path-style -// requests in the format -// https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - You must have the s3:DeleteBucket -// permission on the specified bucket in a policy. -// -// - Directory bucket permissions - You must have the s3express:DeleteBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the -// Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to DeleteBucket : -// -// [CreateBucket] -// -// [DeleteObject] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { - if params == nil { - params = &DeleteBucketInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, c.addOperationDeleteBucketMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketInput struct { - - // Specifies the bucket being deleted. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucket"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucket", - } -} - -// getDeleteBucketBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteBucketBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: false, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignDeleteBucket is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignDeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &DeleteBucketInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "DeleteBucket", params, clientOptFns, - c.client.addOperationDeleteBucketMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addDeleteBucketPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addDeleteBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go deleted file mode 100644 index f1b1e2149534..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ /dev/null @@ -1,280 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// To use this operation, you must have permissions to perform the -// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. -// -// The following operations are related to DeleteBucketAnalyticsConfiguration : -// -// [GetBucketAnalyticsConfiguration] -// -// [ListBucketAnalyticsConfigurations] -// -// [PutBucketAnalyticsConfiguration] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html -// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html -// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketAnalyticsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, c.addOperationDeleteBucketAnalyticsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketAnalyticsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketAnalyticsConfigurationInput struct { - - // The name of the bucket from which an analytics configuration is deleted. - // - // This member is required. - Bucket *string - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketAnalyticsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketAnalyticsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketAnalyticsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketAnalyticsConfiguration", - } -} - -// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketAnalyticsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go deleted file mode 100644 index d45bed38b6f4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ /dev/null @@ -1,268 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the cors configuration information set for the bucket. -// -// To use this operation, you must have permission to perform the s3:PutBucketCORS -// action. The bucket owner has this permission by default and can grant this -// permission to others. -// -// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. -// -// # Related Resources -// -// [PutBucketCors] -// -// [RESTOPTIONSobject] -// -// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html -func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { - if params == nil { - params = &DeleteBucketCorsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, c.addOperationDeleteBucketCorsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketCorsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketCorsInput struct { - - // Specifies the bucket whose cors configuration is being deleted. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketCorsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketCors"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketCorsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketCors", - } -} - -// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketCorsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketCorsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go deleted file mode 100644 index 310e48b31ccb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This implementation of the DELETE action resets the default encryption for the -// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). -// -// - General purpose buckets - For information about the bucket default -// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: SSE-S3 and SSE-KMS. For information about -// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. -// -// Permissions -// -// - General purpose bucket permissions - The s3:PutEncryptionConfiguration -// permission is required in a policy. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:PutEncryptionConfiguration permission in an IAM -// identity-based policy instead of a bucket policy. Cross-account access to this -// API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to DeleteBucketEncryption : -// -// [PutBucketEncryption] -// -// [GetBucketEncryption] -// -// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html -// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html -// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html -// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { - if params == nil { - params = &DeleteBucketEncryptionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, c.addOperationDeleteBucketEncryptionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketEncryptionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketEncryptionInput struct { - - // The name of the bucket containing the server-side encryption configuration to - // delete. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketEncryptionOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketEncryption"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketEncryptionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketEncryption", - } -} - -// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketEncryptionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketEncryptionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go deleted file mode 100644 index b6fdc8d4aa6f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ /dev/null @@ -1,287 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to DeleteBucketIntelligentTieringConfiguration include: -// -// [GetBucketIntelligentTieringConfiguration] -// -// [PutBucketIntelligentTieringConfiguration] -// -// [ListBucketIntelligentTieringConfigurations] -// -// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html -// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html -// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketIntelligentTieringConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, c.addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketIntelligentTieringConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketIntelligentTieringConfigurationInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketIntelligentTieringConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketIntelligentTieringConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketIntelligentTieringConfiguration", - } -} - -// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketIntelligentTieringConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go deleted file mode 100644 index 40ce0377149a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ /dev/null @@ -1,280 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes an S3 Inventory configuration (identified by the inventory ID) from the -// bucket. -// -// To use this operation, you must have permissions to perform the -// s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. -// -// Operations related to DeleteBucketInventoryConfiguration include: -// -// [GetBucketInventoryConfiguration] -// -// [PutBucketInventoryConfiguration] -// -// [ListBucketInventoryConfigurations] -// -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html -// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html -func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketInventoryConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, c.addOperationDeleteBucketInventoryConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketInventoryConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketInventoryConfigurationInput struct { - - // The name of the bucket containing the inventory configuration to delete. - // - // This member is required. - Bucket *string - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketInventoryConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketInventoryConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketInventoryConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketInventoryConfiguration", - } -} - -// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketInventoryConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go deleted file mode 100644 index 1871b2688724..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ /dev/null @@ -1,307 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the lifecycle configuration from the specified bucket. Amazon S3 -// removes all the lifecycle configuration rules in the lifecycle subresource -// associated with the bucket. Your objects never expire, and Amazon S3 no longer -// automatically deletes any objects on the basis of rules contained in the deleted -// lifecycle configuration. -// -// Permissions -// - General purpose bucket permissions - By default, all Amazon S3 resources -// are private, including buckets, objects, and related subresources (for example, -// lifecycle configuration and website configuration). Only the resource owner -// (that is, the Amazon Web Services account that created it) can access the -// resource. The resource owner can optionally grant access permissions to others -// by writing an access policy. For this operation, a user must have the -// s3:PutLifecycleConfiguration permission. -// -// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - You must have the -// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy -// to use this operation. Cross-account access to this API operation isn't -// supported. The resource owner can optionally grant access permissions to others -// by creating a role or user for them as long as they are within the same account -// as the owner and resource. -// -// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in -// -// the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name -// . Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. -// -// Related actions include: -// -// [PutBucketLifecycleConfiguration] -// -// [GetBucketLifecycleConfiguration] -// -// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html -// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions -// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html -// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { - if params == nil { - params = &DeleteBucketLifecycleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, c.addOperationDeleteBucketLifecycleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketLifecycleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketLifecycleInput struct { - - // The bucket name of the lifecycle to delete. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketLifecycleOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketLifecycle"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketLifecycleInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketLifecycle", - } -} - -// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketLifecycleInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketLifecycleBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataConfiguration.go deleted file mode 100644 index 1fa3599ee362..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataConfiguration.go +++ /dev/null @@ -1,280 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes an S3 Metadata configuration from a general purpose bucket. For more -// -// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// You can use the V2 DeleteBucketMetadataConfiguration API operation with V1 or -// V2 metadata configurations. However, if you try to use the V1 -// DeleteBucketMetadataTableConfiguration API operation with V2 configurations, you -// will receive an HTTP 405 Method Not Allowed error. -// -// Permissions To use this operation, you must have the -// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] -// in the Amazon S3 User Guide. -// -// The IAM policy action name is the same for the V1 and V2 API operations. -// -// The following operations are related to DeleteBucketMetadataConfiguration : -// -// [CreateBucketMetadataConfiguration] -// -// [GetBucketMetadataConfiguration] -// -// [UpdateBucketMetadataInventoryTableConfiguration] -// -// [UpdateBucketMetadataJournalTableConfiguration] -// -// [GetBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataConfiguration.html -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [UpdateBucketMetadataJournalTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataJournalTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [UpdateBucketMetadataInventoryTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataInventoryTableConfiguration.html -func (c *Client) DeleteBucketMetadataConfiguration(ctx context.Context, params *DeleteBucketMetadataConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketMetadataConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataConfiguration", params, optFns, c.addOperationDeleteBucketMetadataConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketMetadataConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketMetadataConfigurationInput struct { - - // The general purpose bucket that you want to remove the metadata configuration - // from. - // - // This member is required. - Bucket *string - - // The expected bucket owner of the general purpose bucket that you want to - // remove the metadata table configuration from. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketMetadataConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketMetadataConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMetadataConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketMetadataConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketMetadataConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketMetadataConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketMetadataConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketMetadataConfiguration", - } -} - -// getDeleteBucketMetadataConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketMetadataConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketMetadataConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketMetadataConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketMetadataConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go deleted file mode 100644 index 1ef17a359554..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go +++ /dev/null @@ -1,287 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// We recommend that you delete your S3 Metadata configurations by using the V2 [DeleteBucketMetadataTableConfiguration] -// -// API operation. We no longer recommend using the V1 -// DeleteBucketMetadataTableConfiguration API operation. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// Deletes a V1 S3 Metadata configuration from a general purpose bucket. For more -// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// You can use the V2 DeleteBucketMetadataConfiguration API operation with V1 or -// V2 metadata table configurations. However, if you try to use the V1 -// DeleteBucketMetadataTableConfiguration API operation with V2 configurations, you -// will receive an HTTP 405 Method Not Allowed error. -// -// Make sure that you update your processes to use the new V2 API operations ( -// CreateBucketMetadataConfiguration , GetBucketMetadataConfiguration , and -// DeleteBucketMetadataConfiguration ) instead of the V1 API operations. -// -// Permissions To use this operation, you must have the -// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] -// in the Amazon S3 User Guide. -// -// The following operations are related to DeleteBucketMetadataTableConfiguration : -// -// [CreateBucketMetadataTableConfiguration] -// -// [GetBucketMetadataTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html -// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// -// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html -func (c *Client) DeleteBucketMetadataTableConfiguration(ctx context.Context, params *DeleteBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataTableConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketMetadataTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataTableConfiguration", params, optFns, c.addOperationDeleteBucketMetadataTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketMetadataTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketMetadataTableConfigurationInput struct { - - // The general purpose bucket that you want to remove the metadata table - // configuration from. - // - // This member is required. - Bucket *string - - // The expected bucket owner of the general purpose bucket that you want to - // remove the metadata table configuration from. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketMetadataTableConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketMetadataTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketMetadataTableConfiguration", - } -} - -// getDeleteBucketMetadataTableConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getDeleteBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketMetadataTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketMetadataTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go deleted file mode 100644 index 68e6a5b73716..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes a metrics configuration for the Amazon CloudWatch request metrics -// (specified by the metrics configuration ID) from the bucket. Note that this -// doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the -// s3:PutMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to DeleteBucketMetricsConfiguration : -// -// [GetBucketMetricsConfiguration] -// -// [PutBucketMetricsConfiguration] -// -// [ListBucketMetricsConfigurations] -// -// [Monitoring Metrics with Amazon CloudWatch] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html -// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketMetricsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, c.addOperationDeleteBucketMetricsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketMetricsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketMetricsConfigurationInput struct { - - // The name of the bucket containing the metrics configuration to delete. - // - // This member is required. - Bucket *string - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketMetricsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetricsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketMetricsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketMetricsConfiguration", - } -} - -// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketMetricsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go deleted file mode 100644 index 341f123cf946..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ /dev/null @@ -1,265 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:PutBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see [Specifying Permissions in a Policy]. -// -// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. -// -// The following operations are related to DeleteBucketOwnershipControls : -// -// # GetBucketOwnershipControls -// -// # PutBucketOwnershipControls -// -// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { - if params == nil { - params = &DeleteBucketOwnershipControlsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, c.addOperationDeleteBucketOwnershipControlsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketOwnershipControlsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketOwnershipControlsInput struct { - - // The Amazon S3 bucket whose OwnershipControls you want to delete. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketOwnershipControlsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketOwnershipControls"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketOwnershipControlsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketOwnershipControls", - } -} - -// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketOwnershipControlsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go deleted file mode 100644 index 564fba34f6c4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ /dev/null @@ -1,317 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the policy of a specified bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions If you are using an identity other than the root user of the Amazon -// Web Services account that owns the bucket, the calling identity must both have -// the DeleteBucketPolicy permissions on the specified bucket and belong to the -// bucket owner's account in order to use this operation. -// -// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 -// Access Denied error. If you have the correct permissions, but you're not using -// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. -// -// To ensure that bucket owners don't inadvertently lock themselves out of their -// own buckets, the root principal in a bucket owner's Amazon Web Services account -// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API -// actions, even if their bucket policy explicitly denies the root principal's -// access. Bucket owner root principals can only be blocked from performing these -// API actions by VPC endpoint policies and Amazon Web Services Organizations -// policies. -// -// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is -// required in a policy. For more information about general purpose buckets bucket -// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based -// policy instead of a bucket policy. Cross-account access to this API operation -// isn't supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// # The following operations are related to DeleteBucketPolicy -// -// [CreateBucket] -// -// [DeleteObject] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { - if params == nil { - params = &DeleteBucketPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, c.addOperationDeleteBucketPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketPolicyInput struct { - - // The bucket name. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketPolicyOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketPolicyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketPolicy", - } -} - -// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketPolicyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketPolicyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go deleted file mode 100644 index 1fa0f46986fc..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ /dev/null @@ -1,275 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the replication configuration from the bucket. -// -// To use this operation, you must have permissions to perform the -// s3:PutReplicationConfiguration action. The bucket owner has these permissions by -// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] -// and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// It can take a while for the deletion of a replication configuration to fully -// propagate. -// -// For information about replication configuration, see [Replication] in the Amazon S3 User -// Guide. -// -// The following operations are related to DeleteBucketReplication : -// -// [PutBucketReplication] -// -// [GetBucketReplication] -// -// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html -// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { - if params == nil { - params = &DeleteBucketReplicationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, c.addOperationDeleteBucketReplicationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketReplicationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketReplicationInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketReplicationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketReplication"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketReplicationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketReplication", - } -} - -// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketReplicationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketReplicationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go deleted file mode 100644 index 9d678376c590..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ /dev/null @@ -1,265 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the tags from the bucket. -// -// To use this operation, you must have permission to perform the -// s3:PutBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. -// -// The following operations are related to DeleteBucketTagging : -// -// [GetBucketTagging] -// -// [PutBucketTagging] -// -// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html -// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html -func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { - if params == nil { - params = &DeleteBucketTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, c.addOperationDeleteBucketTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketTaggingInput struct { - - // The bucket that has the tag set to be removed. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketTaggingOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketTagging", - } -} - -// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go deleted file mode 100644 index bd0eadf38d36..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ /dev/null @@ -1,274 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This action removes the website configuration for a bucket. Amazon S3 returns a -// 200 OK response upon successfully deleting a website configuration on the -// specified bucket. You will get a 200 OK response if the website configuration -// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 -// response if the bucket specified in the request does not exist. -// -// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, -// only the bucket owner can delete the website configuration attached to a bucket. -// However, bucket owners can grant other users permission to delete the website -// configuration by writing a bucket policy granting them the -// S3:DeleteBucketWebsite permission. -// -// For more information about hosting websites, see [Hosting Websites on Amazon S3]. -// -// The following operations are related to DeleteBucketWebsite : -// -// [GetBucketWebsite] -// -// [PutBucketWebsite] -// -// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html -// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html -// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html -func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { - if params == nil { - params = &DeleteBucketWebsiteInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, c.addOperationDeleteBucketWebsiteMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketWebsiteOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketWebsiteInput struct { - - // The bucket name for which you want to remove the website configuration. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketWebsiteOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketWebsite"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketWebsiteInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketWebsite", - } -} - -// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketWebsiteInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketWebsiteBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go deleted file mode 100644 index 454f15b07a41..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ /dev/null @@ -1,502 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Removes an object from a bucket. The behavior depends on the bucket's -// versioning state: -// -// - If bucket versioning is not enabled, the operation permanently deletes the -// object. -// -// - If bucket versioning is enabled, the operation inserts a delete marker, -// which becomes the current version of the object. To permanently delete an object -// in a versioned bucket, you must include the object’s versionId in the request. -// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. -// -// - If bucket versioning is suspended, the operation removes the object that -// has a null versionId , if there is one, and inserts a delete marker that -// becomes the current version of the object. If there isn't an object with a null -// versionId , and all versions of the object have a versionId , Amazon S3 does -// not remove the object and only inserts a delete marker. To permanently delete an -// object that has a versionId , you must include the object’s versionId in the -// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. -// -// - Directory buckets - S3 Versioning isn't enabled and supported for directory -// buckets. For this API operation, only the null value of the version ID is -// supported by directory buckets. You can only specify null to the versionId -// query parameter in the request. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// To remove a specific version, you must use the versionId query parameter. Using -// this query parameter permanently deletes the version. If the object deleted is a -// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. -// -// If the object you want to delete is in a bucket where the bucket versioning -// configuration is MFA Delete enabled, you must include the x-amz-mfa request -// header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User -// Guide. To see sample requests that use versioning, see [Sample Request]. -// -// Directory buckets - MFA delete is not supported by directory buckets. -// -// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to -// enable Amazon S3 to remove them for you. If you want to block users or accounts -// from removing or deleting objects from your bucket, you must deny them the -// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. -// -// Directory buckets - S3 Lifecycle is not supported by directory buckets. -// -// Permissions -// -// - General purpose bucket permissions - The following permissions are required -// in your policies when your DeleteObjects request includes specific headers. -// -// - s3:DeleteObject - To delete an object from a bucket, you must always have -// the s3:DeleteObject permission. -// -// - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following action is related to DeleteObject : -// -// [PutObject] -// -// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html -func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { - if params == nil { - params = &DeleteObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, c.addOperationDeleteObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteObjectInput struct { - - // The bucket name of the bucket containing the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key name of the object to delete. - // - // This member is required. - Key *string - - // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to - // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. - // - // This functionality is not supported for directory buckets. - BypassGovernanceRetention *bool - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The If-Match header field makes the request method conditional on ETags. If the - // ETag value does not match, the operation returns a 412 Precondition Failed - // error. If the ETag matches or if the object doesn't exist, the operation will - // return a 204 Success (No Content) response . - // - // For more information about conditional requests, see [RFC 7232]. - // - // This functionality is only supported for directory buckets. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // If present, the object is deleted only if its modification times matches the - // provided Timestamp . If the Timestamp values do not match, the operation - // returns a 412 Precondition Failed error. If the Timestamp matches or if the - // object doesn’t exist, the operation returns a 204 Success (No Content) response. - // - // This functionality is only supported for directory buckets. - IfMatchLastModifiedTime *time.Time - - // If present, the object is deleted only if its size matches the provided size in - // bytes. If the Size value does not match, the operation returns a 412 - // Precondition Failed error. If the Size matches or if the object doesn’t exist, - // the operation returns a 204 Success (No Content) response. - // - // This functionality is only supported for directory buckets. - // - // You can use the If-Match , x-amz-if-match-last-modified-time and - // x-amz-if-match-size conditional headers in conjunction with each-other or - // individually. - IfMatchSize *int64 - - // The concatenation of the authentication device's serial number, a space, and - // the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - // - // This functionality is not supported for directory buckets. - MFA *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Version ID used to reference a specific version of the object. - // - // For directory buckets in this API operation, only the null value of the version - // ID is supported. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type DeleteObjectOutput struct { - - // Indicates whether the specified object version that was permanently deleted was - // (true) or was not (false) a delete marker before deletion. In a simple DELETE, - // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. - // - // This functionality is not supported for directory buckets. - // - // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html - DeleteMarker *bool - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteObject", - } -} - -// getDeleteObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignDeleteObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignDeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &DeleteObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "DeleteObject", params, clientOptFns, - c.client.addOperationDeleteObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addDeleteObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addDeleteObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go deleted file mode 100644 index f105f09628f1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ /dev/null @@ -1,302 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Removes the entire tag set from the specified object. For more information -// about managing object tags, see [Object Tagging]. -// -// To use this operation, you must have permission to perform the -// s3:DeleteObjectTagging action. -// -// To delete tags of a specific object version, add the versionId query parameter -// in the request. You will need permission for the s3:DeleteObjectVersionTagging -// action. -// -// The following operations are related to DeleteObjectTagging : -// -// [PutObjectTagging] -// -// [GetObjectTagging] -// -// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html -// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html -// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html -func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { - if params == nil { - params = &DeleteObjectTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, c.addOperationDeleteObjectTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteObjectTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteObjectTaggingInput struct { - - // The bucket name containing the objects from which to remove the tags. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key that identifies the object in the bucket from which to remove all tags. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The versionId of the object that the tag-set will be removed from. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type DeleteObjectTaggingOutput struct { - - // The versionId of the object the tag-set was removed from. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjectTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteObjectTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteObjectTagging", - } -} - -// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteObjectTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteObjectTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go deleted file mode 100644 index 53424ad6a49e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ /dev/null @@ -1,504 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation enables you to delete multiple objects from a bucket using a -// single HTTP request. If you know the object keys that you want to delete, then -// this operation provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. -// -// The request can contain a list of up to 1,000 keys that you want to delete. In -// the XML, you provide the object key names, and optionally, version IDs if you -// want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the -// result of that delete, success or failure, in the response. If the object -// specified in the request isn't found, Amazon S3 confirms the deletion by -// returning the result as deleted. -// -// - Directory buckets - S3 Versioning isn't enabled and supported for directory -// buckets. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion in a quiet mode, the operation does not return any -// information about the delete in the response body. -// -// When performing this action on an MFA Delete enabled bucket, that attempts to -// delete any versioned objects, you must include an MFA token. If you do not -// provide one, the entire request will fail, even if there are non-versioned -// objects you are trying to delete. If you provide an invalid token, whether there -// are versioned keys in the request or not, the entire Multi-Object Delete request -// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. -// -// Directory buckets - MFA delete is not supported by directory buckets. -// -// Permissions -// -// - General purpose bucket permissions - The following permissions are required -// in your policies when your DeleteObjects request includes specific headers. -// -// - s3:DeleteObject - To delete an object from a bucket, you must always specify -// the s3:DeleteObject permission. -// -// - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion -// permission. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Content-MD5 request header -// -// - General purpose bucket - The Content-MD5 request header is required for all -// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that -// your request body has not been altered in transit. -// -// - Directory bucket - The Content-MD5 request header or a additional checksum -// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , -// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all -// Multi-Object Delete requests. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to DeleteObjects : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [ListParts] -// -// [AbortMultipartUpload] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { - if params == nil { - params = &DeleteObjectsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, c.addOperationDeleteObjectsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteObjectsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteObjectsInput struct { - - // The bucket name containing the objects to delete. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Container for the request. - // - // This member is required. - Delete *types.Delete - - // Specifies whether you want to delete this object even if it has a - // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. - // - // This functionality is not supported for directory buckets. - BypassGovernanceRetention *bool - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . - // - // For the x-amz-checksum-algorithm header, replace algorithm with the - // supported algorithm from the following list: - // - // - CRC32 - // - // - CRC32C - // - // - CRC64NVME - // - // - SHA1 - // - // - SHA256 - // - // For more information, see [Checking object integrity] in the Amazon S3 User Guide. - // - // If the individual checksum value you provide through x-amz-checksum-algorithm - // doesn't match the checksum algorithm you set through - // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest - // error. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The concatenation of the authentication device's serial number, a space, and - // the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - // - // When performing the DeleteObjects operation on an MFA delete enabled bucket, - // which attempts to delete the specified versioned objects, you must include an - // MFA token. If you don't provide an MFA token, the entire request will fail, even - // if there are non-versioned objects that you are trying to delete. If you provide - // an invalid token, whether there are versioned object keys in the request or not, - // the entire Multi-Object Delete request will fail. For information about MFA - // Delete, see [MFA Delete]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete - MFA *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type DeleteObjectsOutput struct { - - // Container element for a successful delete. It identifies the object that was - // successfully deleted. - Deleted []types.DeletedObject - - // Container for a failed delete action that describes the object that Amazon S3 - // attempted to delete and the error it encountered. - Errors []types.Error - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjects"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteObjectsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteObjects", - } -} - -// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*DeleteObjectsInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getDeleteObjectsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteObjectsBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteObjectsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteObjectsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go deleted file mode 100644 index 6096be7b784a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ /dev/null @@ -1,271 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For -// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// The following operations are related to DeletePublicAccessBlock : -// -// [Using Amazon S3 Block Public Access] -// -// [GetPublicAccessBlock] -// -// [PutPublicAccessBlock] -// -// [GetBucketPolicyStatus] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html -func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { - if params == nil { - params = &DeletePublicAccessBlockInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, c.addOperationDeletePublicAccessBlockMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeletePublicAccessBlockOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeletePublicAccessBlockInput struct { - - // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeletePublicAccessBlockOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePublicAccessBlock"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeletePublicAccessBlockInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeletePublicAccessBlock", - } -} - -// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) { - in := input.(*DeletePublicAccessBlockInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeletePublicAccessBlockBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go deleted file mode 100644 index d905b6a7248b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the GET action uses the accelerate subresource to return -// the Transfer Acceleration state of a bucket, which is either Enabled or -// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that -// enables you to perform faster data transfers to and from Amazon S3. -// -// To use this operation, you must have permission to perform the -// s3:GetAccelerateConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. -// -// You set the Transfer Acceleration state of an existing bucket to Enabled or -// Suspended by using the [PutBucketAccelerateConfiguration] operation. -// -// A GET accelerate request does not return a state value for a bucket that has no -// transfer acceleration state. A bucket has no Transfer Acceleration state if a -// state has never been set on the bucket. -// -// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User -// Guide. -// -// The following operations are related to GetBucketAccelerateConfiguration : -// -// [PutBucketAccelerateConfiguration] -// -// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { - if params == nil { - params = &GetBucketAccelerateConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, c.addOperationGetBucketAccelerateConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketAccelerateConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketAccelerateConfigurationInput struct { - - // The name of the bucket for which the accelerate configuration is retrieved. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketAccelerateConfigurationOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // The accelerate configuration of the bucket. - Status types.BucketAccelerateStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAccelerateConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketAccelerateConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketAccelerateConfiguration", - } -} - -// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketAccelerateConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go deleted file mode 100644 index 6fd57ef54d7c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// This implementation of the GET action uses the acl subresource to return the -// access control list (ACL) of a bucket. To use GET to return the ACL of the -// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission -// is granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// requests to read ACLs are still supported and return the -// bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. -// -// The following operations are related to GetBucketAcl : -// -// [ListObjects] -// -// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { - if params == nil { - params = &GetBucketAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, c.addOperationGetBucketAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketAclInput struct { - - // Specifies the S3 bucket whose ACL is being requested. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketAclOutput struct { - - // A list of grants. - Grants []types.Grant - - // Container for the bucket owner's display name and ID. - Owner *types.Owner - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketAcl", - } -} - -// getGetBucketAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketAclBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go deleted file mode 100644 index 2d1f41fe7cf6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ /dev/null @@ -1,286 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the GET action returns an analytics configuration -// (identified by the analytics configuration ID) from the bucket. -// -// To use this operation, you must have permissions to perform the -// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. -// -// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User -// Guide. -// -// The following operations are related to GetBucketAnalyticsConfiguration : -// -// [DeleteBucketAnalyticsConfiguration] -// -// [ListBucketAnalyticsConfigurations] -// -// [PutBucketAnalyticsConfiguration] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html -// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { - if params == nil { - params = &GetBucketAnalyticsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, c.addOperationGetBucketAnalyticsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketAnalyticsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketAnalyticsConfigurationInput struct { - - // The name of the bucket from which an analytics configuration is retrieved. - // - // This member is required. - Bucket *string - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketAnalyticsConfigurationOutput struct { - - // The configuration and any analyses for the analytics filter. - AnalyticsConfiguration *types.AnalyticsConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAnalyticsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketAnalyticsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketAnalyticsConfiguration", - } -} - -// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketAnalyticsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go deleted file mode 100644 index 0fa6c3eb3255..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the Cross-Origin Resource Sharing (CORS) configuration information set -// for the bucket. -// -// To use this operation, you must have permission to perform the s3:GetBucketCORS -// action. By default, the bucket owner has this permission and can grant it to -// others. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. -// -// The following operations are related to GetBucketCors : -// -// [PutBucketCors] -// -// [DeleteBucketCors] -// -// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html -func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { - if params == nil { - params = &GetBucketCorsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, c.addOperationGetBucketCorsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketCorsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketCorsInput struct { - - // The bucket name for which to get the cors configuration. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketCorsOutput struct { - - // A set of origins and methods (cross-origin access that you want to allow). You - // can add up to 100 rules to the configuration. - CORSRules []types.CORSRule - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketCors"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketCorsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketCors", - } -} - -// getGetBucketCorsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketCorsBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketCorsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketCorsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go deleted file mode 100644 index 183bca49604a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ /dev/null @@ -1,311 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the default encryption configuration for an Amazon S3 bucket. By -// default, all buckets have a default encryption configuration that uses -// server-side encryption with Amazon S3 managed keys (SSE-S3). -// -// - General purpose buckets - For information about the bucket default -// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: SSE-S3 and SSE-KMS. For information about -// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. -// -// Permissions -// -// - General purpose bucket permissions - The s3:GetEncryptionConfiguration -// permission is required in a policy. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:GetEncryptionConfiguration permission in an IAM -// identity-based policy instead of a bucket policy. Cross-account access to this -// API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to GetBucketEncryption : -// -// [PutBucketEncryption] -// -// [DeleteBucketEncryption] -// -// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html -// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html -// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html -// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { - if params == nil { - params = &GetBucketEncryptionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, c.addOperationGetBucketEncryptionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketEncryptionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketEncryptionInput struct { - - // The name of the bucket from which the server-side encryption configuration is - // retrieved. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketEncryptionOutput struct { - - // Specifies the default server-side-encryption configuration. - ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketEncryption"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketEncryptionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketEncryption", - } -} - -// getGetBucketEncryptionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketEncryptionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketEncryptionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go deleted file mode 100644 index 29969eae580a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ /dev/null @@ -1,292 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to GetBucketIntelligentTieringConfiguration include: -// -// [DeleteBucketIntelligentTieringConfiguration] -// -// [PutBucketIntelligentTieringConfiguration] -// -// [ListBucketIntelligentTieringConfigurations] -// -// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html -// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html -func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { - if params == nil { - params = &GetBucketIntelligentTieringConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, c.addOperationGetBucketIntelligentTieringConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketIntelligentTieringConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketIntelligentTieringConfigurationInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketIntelligentTieringConfigurationOutput struct { - - // Container for S3 Intelligent-Tiering configuration. - IntelligentTieringConfiguration *types.IntelligentTieringConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketIntelligentTieringConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketIntelligentTieringConfiguration", - } -} - -// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketIntelligentTieringConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go deleted file mode 100644 index 2bfff901c373..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ /dev/null @@ -1,285 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns an S3 Inventory configuration (identified by the inventory -// configuration ID) from the bucket. -// -// To use this operation, you must have permissions to perform the -// s3:GetInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. -// -// The following operations are related to GetBucketInventoryConfiguration : -// -// [DeleteBucketInventoryConfiguration] -// -// [ListBucketInventoryConfigurations] -// -// [PutBucketInventoryConfiguration] -// -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html -func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { - if params == nil { - params = &GetBucketInventoryConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, c.addOperationGetBucketInventoryConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketInventoryConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketInventoryConfigurationInput struct { - - // The name of the bucket containing the inventory configuration to retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketInventoryConfigurationOutput struct { - - // Specifies the inventory configuration. - InventoryConfiguration *types.InventoryConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketInventoryConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketInventoryConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketInventoryConfiguration", - } -} - -// getGetBucketInventoryConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketInventoryConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go deleted file mode 100644 index 89f2b1f2398a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ /dev/null @@ -1,351 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the lifecycle configuration information set on the bucket. For -// information about lifecycle configuration, see [Object Lifecycle Management]. -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, object size, or any -// combination of these. Accordingly, this section describes the latest API, which -// is compatible with the new functionality. The previous version of the API -// supported filtering based only on an object key name prefix, which is supported -// for general purpose buckets for backward compatibility. For the related API -// description, see [GetBucketLifecycle]. -// -// Lifecyle configurations for directory buckets only support expiring objects and -// cancelling multipart uploads. Expiring of versioned objects, transitions and tag -// filters are not supported. -// -// Permissions -// - General purpose bucket permissions - By default, all Amazon S3 resources -// are private, including buckets, objects, and related subresources (for example, -// lifecycle configuration and website configuration). Only the resource owner -// (that is, the Amazon Web Services account that created it) can access the -// resource. The resource owner can optionally grant access permissions to others -// by writing an access policy. For this operation, a user must have the -// s3:GetLifecycleConfiguration permission. -// -// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - You must have the -// s3express:GetLifecycleConfiguration permission in an IAM identity-based policy -// to use this operation. Cross-account access to this API operation isn't -// supported. The resource owner can optionally grant access permissions to others -// by creating a role or user for them as long as they are within the same account -// as the owner and resource. -// -// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in -// -// the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name -// . Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// GetBucketLifecycleConfiguration has the following special error: -// -// - Error code: NoSuchLifecycleConfiguration -// -// - Description: The lifecycle configuration does not exist. -// -// - HTTP Status Code: 404 Not Found -// -// - SOAP Fault Code Prefix: Client -// -// The following operations are related to GetBucketLifecycleConfiguration : -// -// [GetBucketLifecycle] -// -// [PutBucketLifecycle] -// -// [DeleteBucketLifecycle] -// -// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { - if params == nil { - params = &GetBucketLifecycleConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, c.addOperationGetBucketLifecycleConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketLifecycleConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketLifecycleConfigurationInput struct { - - // The name of the bucket for which to get the lifecycle information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketLifecycleConfigurationOutput struct { - - // Container for a lifecycle rule. - Rules []types.LifecycleRule - - // Indicates which default minimum object size behavior is applied to the - // lifecycle configuration. - // - // This parameter applies to general purpose buckets only. It isn't supported for - // directory bucket lifecycle configurations. - // - // - all_storage_classes_128K - Objects smaller than 128 KB will not transition - // to any storage class by default. - // - // - varies_by_storage_class - Objects smaller than 128 KB will transition to - // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, - // all other storage classes will prevent transitions smaller than 128 KB. - // - // To customize the minimum object size for any transition you can add a filter - // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body - // of your transition rule. Custom filters always take precedence over the default - // transition behavior. - TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLifecycleConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketLifecycleConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketLifecycleConfiguration", - } -} - -// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketLifecycleConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go deleted file mode 100644 index 10b781392473..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ /dev/null @@ -1,364 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -// This operation is not supported for directory buckets. -// -// Returns the Region the bucket resides in. You set the bucket's Region using the -// LocationConstraint request parameter in a CreateBucket request. For more -// information, see [CreateBucket]. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For -// backward compatibility, Amazon S3 continues to support GetBucketLocation. -// -// The following operations are related to GetBucketLocation : -// -// [GetObject] -// -// [CreateBucket] -// -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html -func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { - if params == nil { - params = &GetBucketLocationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, c.addOperationGetBucketLocationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketLocationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketLocationInput struct { - - // The name of the bucket for which to get the location. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketLocationOutput struct { - - // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see [Regions and Endpoints]. - // - // Buckets in Region us-east-1 have a LocationConstraint of null . Buckets with a - // LocationConstraint of EU reside in eu-west-1 . - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - LocationConstraint types.BucketLocationConstraint - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLocation"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = swapDeserializerHelper(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -type awsRestxml_deserializeOpGetBucketLocation_custom struct { -} - -func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) - } - output := &GetBucketLocationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{}) - err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) - if err == io.EOF { - err = nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -// Helper to swap in a custom deserializer -func swapDeserializerHelper(stack *middleware.Stack) error { - _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{}) - if err != nil { - return err - } - return nil -} - -func (v *GetBucketLocationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketLocation", - } -} - -// getGetBucketLocationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketLocationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketLocationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketLocationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go deleted file mode 100644 index 45142425b9aa..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ /dev/null @@ -1,282 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// Returns the logging status of a bucket and the permissions users have to view -// and modify that status. -// -// The following operations are related to GetBucketLogging : -// -// [CreateBucket] -// -// [PutBucketLogging] -// -// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { - if params == nil { - params = &GetBucketLoggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, c.addOperationGetBucketLoggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketLoggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketLoggingInput struct { - - // The bucket name for which to get the logging information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketLoggingOutput struct { - - // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API - // Reference. - // - // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html - LoggingEnabled *types.LoggingEnabled - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLogging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketLoggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketLogging", - } -} - -// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketLoggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketLoggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataConfiguration.go deleted file mode 100644 index 8f6b2609d8ea..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataConfiguration.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the S3 Metadata configuration for a general purpose bucket. For more -// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// You can use the V2 GetBucketMetadataConfiguration API operation with V1 or V2 -// metadata configurations. However, if you try to use the V1 -// GetBucketMetadataTableConfiguration API operation with V2 configurations, you -// will receive an HTTP 405 Method Not Allowed error. -// -// Permissions To use this operation, you must have the -// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] -// in the Amazon S3 User Guide. -// -// The IAM policy action name is the same for the V1 and V2 API operations. -// -// The following operations are related to GetBucketMetadataConfiguration : -// -// [CreateBucketMetadataConfiguration] -// -// [DeleteBucketMetadataConfiguration] -// -// [UpdateBucketMetadataInventoryTableConfiguration] -// -// [UpdateBucketMetadataJournalTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [UpdateBucketMetadataJournalTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataJournalTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [UpdateBucketMetadataInventoryTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataInventoryTableConfiguration.html -// [DeleteBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataConfiguration.html -func (c *Client) GetBucketMetadataConfiguration(ctx context.Context, params *GetBucketMetadataConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataConfigurationOutput, error) { - if params == nil { - params = &GetBucketMetadataConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataConfiguration", params, optFns, c.addOperationGetBucketMetadataConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketMetadataConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketMetadataConfigurationInput struct { - - // The general purpose bucket that corresponds to the metadata configuration that - // you want to retrieve. - // - // This member is required. - Bucket *string - - // The expected owner of the general purpose bucket that you want to retrieve the - // metadata table configuration for. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketMetadataConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketMetadataConfigurationOutput struct { - - // The metadata configuration for the general purpose bucket. - GetBucketMetadataConfigurationResult *types.GetBucketMetadataConfigurationResult - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketMetadataConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketMetadataConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketMetadataConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketMetadataConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketMetadataConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketMetadataConfiguration", - } -} - -// getGetBucketMetadataConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketMetadataConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketMetadataConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketMetadataConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketMetadataConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go deleted file mode 100644 index eccbb6bee039..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go +++ /dev/null @@ -1,292 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// We recommend that you retrieve your S3 Metadata configurations by using the V2 [GetBucketMetadataTableConfiguration] -// -// API operation. We no longer recommend using the V1 -// GetBucketMetadataTableConfiguration API operation. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// Retrieves the V1 S3 Metadata configuration for a general purpose bucket. For -// more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// You can use the V2 GetBucketMetadataConfiguration API operation with V1 or V2 -// metadata table configurations. However, if you try to use the V1 -// GetBucketMetadataTableConfiguration API operation with V2 configurations, you -// will receive an HTTP 405 Method Not Allowed error. -// -// Make sure that you update your processes to use the new V2 API operations ( -// CreateBucketMetadataConfiguration , GetBucketMetadataConfiguration , and -// DeleteBucketMetadataConfiguration ) instead of the V1 API operations. -// -// Permissions To use this operation, you must have the -// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] -// in the Amazon S3 User Guide. -// -// The following operations are related to GetBucketMetadataTableConfiguration : -// -// [CreateBucketMetadataTableConfiguration] -// -// [DeleteBucketMetadataTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html -// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// -// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html -func (c *Client) GetBucketMetadataTableConfiguration(ctx context.Context, params *GetBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataTableConfigurationOutput, error) { - if params == nil { - params = &GetBucketMetadataTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataTableConfiguration", params, optFns, c.addOperationGetBucketMetadataTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketMetadataTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketMetadataTableConfigurationInput struct { - - // The general purpose bucket that corresponds to the metadata table - // configuration that you want to retrieve. - // - // This member is required. - Bucket *string - - // The expected owner of the general purpose bucket that you want to retrieve the - // metadata table configuration for. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketMetadataTableConfigurationOutput struct { - - // The metadata table configuration for the general purpose bucket. - GetBucketMetadataTableConfigurationResult *types.GetBucketMetadataTableConfigurationResult - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketMetadataTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketMetadataTableConfiguration", - } -} - -// getGetBucketMetadataTableConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketMetadataTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketMetadataTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go deleted file mode 100644 index 54a3027d206a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ /dev/null @@ -1,288 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets a metrics configuration (specified by the metrics configuration ID) from -// the bucket. Note that this doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the -// s3:GetMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to GetBucketMetricsConfiguration : -// -// [PutBucketMetricsConfiguration] -// -// [DeleteBucketMetricsConfiguration] -// -// [ListBucketMetricsConfigurations] -// -// [Monitoring Metrics with Amazon CloudWatch] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { - if params == nil { - params = &GetBucketMetricsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, c.addOperationGetBucketMetricsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketMetricsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketMetricsConfigurationInput struct { - - // The name of the bucket containing the metrics configuration to retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketMetricsConfigurationOutput struct { - - // Specifies the metrics configuration. - MetricsConfiguration *types.MetricsConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetricsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketMetricsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketMetricsConfiguration", - } -} - -// getGetBucketMetricsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketMetricsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go deleted file mode 100644 index d2de8bc5fa5e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ /dev/null @@ -1,311 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the notification configuration of a bucket. -// -// If notifications are not enabled on the bucket, the action returns an empty -// NotificationConfiguration element. -// -// By default, you must be the bucket owner to read the notification configuration -// of a bucket. However, the bucket owner can use a bucket policy to grant -// permission to other users to read this configuration with the -// s3:GetBucketNotification permission. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// For more information about setting and reading the notification configuration -// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. -// -// The following action is related to GetBucketNotification : -// -// [PutBucketNotification] -// -// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html -func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { - if params == nil { - params = &GetBucketNotificationConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, c.addOperationGetBucketNotificationConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketNotificationConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketNotificationConfigurationInput struct { - - // The name of the bucket for which to get the notification configuration. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -// A container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off for the bucket. -type GetBucketNotificationConfigurationOutput struct { - - // Enables delivery of events to Amazon EventBridge. - EventBridgeConfiguration *types.EventBridgeConfiguration - - // Describes the Lambda functions to invoke and the events for which to invoke - // them. - LambdaFunctionConfigurations []types.LambdaFunctionConfiguration - - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. - QueueConfigurations []types.QueueConfiguration - - // The topic to which notifications are sent and the events for which - // notifications are generated. - TopicConfigurations []types.TopicConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketNotificationConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketNotificationConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketNotificationConfiguration", - } -} - -// getGetBucketNotificationConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketNotificationConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go deleted file mode 100644 index 0c492f5eb0b1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ /dev/null @@ -1,281 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:GetBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see [Specifying permissions in a policy]. -// -// A bucket doesn't have OwnershipControls settings in the following cases: -// -// - The bucket was created before the BucketOwnerEnforced ownership setting was -// introduced and you've never explicitly applied this value -// -// - You've manually deleted the bucket ownership control value using the -// DeleteBucketOwnershipControls API operation. -// -// By default, Amazon S3 sets OwnershipControls for all newly created buckets. -// -// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. -// -// The following operations are related to GetBucketOwnershipControls : -// -// # PutBucketOwnershipControls -// -// # DeleteBucketOwnershipControls -// -// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html -func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { - if params == nil { - params = &GetBucketOwnershipControlsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, c.addOperationGetBucketOwnershipControlsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketOwnershipControlsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketOwnershipControlsInput struct { - - // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketOwnershipControlsOutput struct { - - // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or - // ObjectWriter) currently in effect for this Amazon S3 bucket. - OwnershipControls *types.OwnershipControls - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketOwnershipControls"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketOwnershipControlsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketOwnershipControls", - } -} - -// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketOwnershipControlsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketOwnershipControlsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go deleted file mode 100644 index 6cc341d7d818..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the policy of a specified bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions If you are using an identity other than the root user of the Amazon -// Web Services account that owns the bucket, the calling identity must both have -// the GetBucketPolicy permissions on the specified bucket and belong to the -// bucket owner's account in order to use this operation. -// -// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. -// -// To ensure that bucket owners don't inadvertently lock themselves out of their -// own buckets, the root principal in a bucket owner's Amazon Web Services account -// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API -// actions, even if their bucket policy explicitly denies the root principal's -// access. Bucket owner root principals can only be blocked from performing these -// API actions by VPC endpoint policies and Amazon Web Services Organizations -// policies. -// -// - General purpose bucket permissions - The s3:GetBucketPolicy permission is -// required in a policy. For more information about general purpose buckets bucket -// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:GetBucketPolicy permission in an IAM identity-based -// policy instead of a bucket policy. Cross-account access to this API operation -// isn't supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] -// in the Amazon S3 User Guide. -// -// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following action is related to GetBucketPolicy : -// -// [GetObject] -// -// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { - if params == nil { - params = &GetBucketPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketPolicyInput struct { - - // The bucket name to get the bucket policy for. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // Access points - When you use this API operation with an access point, provide - // the alias of the access point in place of the bucket name. - // - // Object Lambda access points - When you use this API operation with an Object - // Lambda access point, provide the alias of the Object Lambda access point in - // place of the bucket name. If the Object Lambda access point alias in a request - // is not valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see [List of Error Codes]. - // - // Object Lambda access points are not supported by directory buckets. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketPolicyOutput struct { - - // The bucket policy as a JSON document. - Policy *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketPolicyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketPolicy", - } -} - -// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketPolicyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketPolicyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go deleted file mode 100644 index 76a10e132b49..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ /dev/null @@ -1,279 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. In order to use this operation, you must have the -// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 -// permissions, see [Specifying Permissions in a Policy]. -// -// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. -// -// The following operations are related to GetBucketPolicyStatus : -// -// [Using Amazon S3 Block Public Access] -// -// [GetPublicAccessBlock] -// -// [PutPublicAccessBlock] -// -// [DeletePublicAccessBlock] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { - if params == nil { - params = &GetBucketPolicyStatusInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, c.addOperationGetBucketPolicyStatusMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketPolicyStatusOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketPolicyStatusInput struct { - - // The name of the Amazon S3 bucket whose policy status you want to retrieve. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketPolicyStatusOutput struct { - - // The policy status for the specified bucket. - PolicyStatus *types.PolicyStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicyStatus"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketPolicyStatusInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketPolicyStatus", - } -} - -// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketPolicyStatusInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketPolicyStatusBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go deleted file mode 100644 index dec0e0a6570f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ /dev/null @@ -1,286 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the replication configuration of a bucket. -// -// It can take a while to propagate the put or delete a replication configuration -// to all Amazon S3 systems. Therefore, a get request soon after put or delete can -// return a wrong result. -// -// For information about replication configuration, see [Replication] in the Amazon S3 User -// Guide. -// -// This action requires permissions for the s3:GetReplicationConfiguration action. -// For more information about permissions, see [Using Bucket Policies and User Policies]. -// -// If you include the Filter element in a replication configuration, you must also -// include the DeleteMarkerReplication and Priority elements. The response also -// returns those elements. -// -// For information about GetBucketReplication errors, see [List of replication-related error codes] -// -// The following operations are related to GetBucketReplication : -// -// [PutBucketReplication] -// -// [DeleteBucketReplication] -// -// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html -// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList -// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html -func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { - if params == nil { - params = &GetBucketReplicationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, c.addOperationGetBucketReplicationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketReplicationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketReplicationInput struct { - - // The bucket name for which to get the replication information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketReplicationOutput struct { - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - ReplicationConfiguration *types.ReplicationConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketReplication"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketReplicationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketReplication", - } -} - -// getGetBucketReplicationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketReplicationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketReplicationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go deleted file mode 100644 index 81e855209a41..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ /dev/null @@ -1,265 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the request payment configuration of a bucket. To use this version of -// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. -// -// The following operations are related to GetBucketRequestPayment : -// -// [ListObjects] -// -// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html -// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { - if params == nil { - params = &GetBucketRequestPaymentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, c.addOperationGetBucketRequestPaymentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketRequestPaymentOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketRequestPaymentInput struct { - - // The name of the bucket for which to get the payment request configuration - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketRequestPaymentOutput struct { - - // Specifies who pays for the download and request fees. - Payer types.Payer - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketRequestPayment"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketRequestPaymentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketRequestPayment", - } -} - -// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketRequestPaymentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketRequestPaymentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go deleted file mode 100644 index 29a440048cd6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ /dev/null @@ -1,278 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the tag set associated with the bucket. -// -// To use this operation, you must have permission to perform the -// s3:GetBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. -// -// GetBucketTagging has the following special error: -// -// - Error code: NoSuchTagSet -// -// - Description: There is no tag set associated with the bucket. -// -// The following operations are related to GetBucketTagging : -// -// [PutBucketTagging] -// -// [DeleteBucketTagging] -// -// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html -// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html -func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { - if params == nil { - params = &GetBucketTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, c.addOperationGetBucketTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketTaggingInput struct { - - // The name of the bucket for which to get the tagging information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketTaggingOutput struct { - - // Contains the tag set. - // - // This member is required. - TagSet []types.Tag - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketTagging", - } -} - -// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go deleted file mode 100644 index 7f91571619e6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ /dev/null @@ -1,280 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the versioning state of a bucket. -// -// To retrieve the versioning state of a bucket, you must be the bucket owner. -// -// This implementation also returns the MFA Delete status of the versioning state. -// If the MFA Delete status is enabled , the bucket owner must use an -// authentication device to change the versioning state of the bucket. -// -// The following operations are related to GetBucketVersioning : -// -// [GetObject] -// -// [PutObject] -// -// [DeleteObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { - if params == nil { - params = &GetBucketVersioningInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, c.addOperationGetBucketVersioningMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketVersioningOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketVersioningInput struct { - - // The name of the bucket for which to get the versioning information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketVersioningOutput struct { - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA delete. - // If the bucket has never been so configured, this element is not returned. - MFADelete types.MFADeleteStatus - - // The versioning state of the bucket. - Status types.BucketVersioningStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketVersioning"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketVersioningInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketVersioning", - } -} - -// getGetBucketVersioningBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketVersioningInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketVersioningBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go deleted file mode 100644 index a0601bc0d7d5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the website configuration for a bucket. To host website on Amazon S3, -// you can configure a bucket as website by adding a website configuration. For -// more information about hosting websites, see [Hosting Websites on Amazon S3]. -// -// This GET action requires the S3:GetBucketWebsite permission. By default, only -// the bucket owner can read the bucket website configuration. However, bucket -// owners can allow other users to read the website configuration by writing a -// bucket policy granting them the S3:GetBucketWebsite permission. -// -// The following operations are related to GetBucketWebsite : -// -// [DeleteBucketWebsite] -// -// [PutBucketWebsite] -// -// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html -// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html -// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html -func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { - if params == nil { - params = &GetBucketWebsiteInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, c.addOperationGetBucketWebsiteMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketWebsiteOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketWebsiteInput struct { - - // The bucket name for which to get the website configuration. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketWebsiteOutput struct { - - // The object key name of the website error document to use for 4XX class errors. - ErrorDocument *types.ErrorDocument - - // The name of the index document for the website (for example index.html ). - IndexDocument *types.IndexDocument - - // Specifies the redirect behavior of all requests to a website endpoint of an - // Amazon S3 bucket. - RedirectAllRequestsTo *types.RedirectAllRequestsTo - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []types.RoutingRule - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketWebsite"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketWebsiteInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketWebsite", - } -} - -// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketWebsiteInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketWebsiteBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go deleted file mode 100644 index 6d18100eda40..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ /dev/null @@ -1,924 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "time" -) - -// Retrieves an object from Amazon S3. -// -// In the GetObject request, specify the full key name for the object. -// -// General purpose buckets - Both the virtual-hosted-style requests and the -// path-style requests are supported. For a virtual hosted-style request example, -// if you have the object photos/2006/February/sample.jpg , specify the object key -// name as /photos/2006/February/sample.jpg . For a path-style request example, if -// you have the object photos/2006/February/sample.jpg in the bucket named -// examplebucket , specify the object key name as -// /examplebucket/photos/2006/February/sample.jpg . For more information about -// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. -// -// Directory buckets - Only virtual-hosted-style requests are supported. For a -// virtual hosted-style request example, if you have the object -// photos/2006/February/sample.jpg in the bucket named -// amzn-s3-demo-bucket--usw2-az1--x-s3 , specify the object key name as -// /photos/2006/February/sample.jpg . Also, when you make requests to this API -// operation, your requests are sent to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - You must have the required permissions -// in a policy. To use GetObject , you must have the READ access to the object -// (or version). If you grant READ access to the anonymous user, the GetObject -// operation returns the object without using an authorization header. For more -// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. -// -// If you include a versionId in your request header, you must have the -// -// s3:GetObjectVersion permission to access a specific version of an object. The -// s3:GetObject permission is not required in this scenario. -// -// If you request the current version of an object without a specific versionId in -// -// the request header, only the s3:GetObject permission is required. The -// s3:GetObjectVersion permission is not required in this scenario. -// -// If the object that you request doesn’t exist, the error that Amazon S3 returns -// -// depends on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Access Denied error. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted using SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Storage classes If the object you are retrieving is stored in the S3 Glacier -// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the -// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep -// Archive Access tier, before you can retrieve the object you must first restore a -// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. -// -// Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 -// Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 -// One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported -// storage class values won't write a destination object and will respond with the -// HTTP status code 400 Bad Request . -// -// Encryption Encryption request headers, like x-amz-server-side-encryption , -// should not be sent for the GetObject requests, if your object uses server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you -// include the header in your GetObject requests for the object that uses these -// types of keys, you’ll get an HTTP 400 Bad Request error. -// -// Directory buckets - For directory buckets, there are only two supported options -// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more -// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. -// -// Overriding response header values through the request There are times when you -// want to override certain response header values of a GetObject response. For -// example, you might override the Content-Disposition response header value -// through your GetObject request. -// -// You can override values for a set of response headers. These modified response -// header values are included only in a successful response, that is, when the HTTP -// status code 200 OK is returned. The headers you can override using the -// following query parameters in the request are a subset of the headers that -// Amazon S3 accepts when you create an object. -// -// The response headers that you can override for the GetObject response are -// Cache-Control , Content-Disposition , Content-Encoding , Content-Language , -// Content-Type , and Expires . -// -// To override values for a set of response headers in the GetObject response, you -// can use the following query parameters in the request. -// -// - response-cache-control -// -// - response-content-disposition -// -// - response-content-encoding -// -// - response-content-language -// -// - response-content-type -// -// - response-expires -// -// When you use these parameters, you must sign the request by using either an -// Authorization header or a presigned URL. These parameters cannot be used with an -// unsigned (anonymous) request. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to GetObject : -// -// [ListBuckets] -// -// [GetObjectAcl] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html -// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket -// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html -// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { - if params == nil { - params = &GetObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, c.addOperationGetObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectInput struct { - - // The bucket name containing the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points - When you use this action with an Object Lambda - // access point, you must direct requests to the Object Lambda access point - // hostname. The Object Lambda access point hostname takes the form - // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key of the object to get. - // - // This member is required. - Key *string - - // To retrieve the checksum, this mode must be enabled. - ChecksumMode types.ChecksumMode - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Return the object only if its entity tag (ETag) is the same as the one - // specified in this header; otherwise, return a 412 Precondition Failed error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: If-Match condition evaluates to true , and; - // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and - // the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 Not Modified error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: If-None-Match condition evaluates to false , and; - // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not - // Modified status code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfModifiedSince *time.Time - - // Return the object only if its entity tag (ETag) is different from the one - // specified in this header; otherwise, return a 304 Not Modified error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: If-None-Match condition evaluates to false , and; - // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not - // Modified HTTP status code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 Precondition Failed error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: If-Match condition evaluates to true , and; - // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and - // the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfUnmodifiedSince *time.Time - - // Part number of the object being read. This is a positive integer between 1 and - // 10,000. Effectively performs a 'ranged' GET request for the part specified. - // Useful for downloading just a part of an object. - PartNumber *int32 - - // Downloads the specified byte range of an object. For more information about the - // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. - // - // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range - Range *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string - - // Sets the Content-Disposition header of the response. - ResponseContentDisposition *string - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string - - // Sets the Content-Type header of the response. - ResponseContentType *string - - // Sets the Expires header of the response. - ResponseExpires *time.Time - - // Specifies the algorithm to use when decrypting the object (for example, AES256 ). - // - // If you encrypt an object by using server-side encryption with customer-provided - // encryption keys (SSE-C) when you store the object in Amazon S3, then when you - // GET the object, you must use the following headers: - // - // - x-amz-server-side-encryption-customer-algorithm - // - // - x-amz-server-side-encryption-customer-key - // - // - x-amz-server-side-encryption-customer-key-MD5 - // - // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key that you originally provided for - // Amazon S3 to encrypt the data before storing it. This value is used to decrypt - // the object when recovering it and must match the one used when storing the data. - // The key must be appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // If you encrypt an object by using server-side encryption with customer-provided - // encryption keys (SSE-C) when you store the object in Amazon S3, then when you - // GET the object, you must use the following headers: - // - // - x-amz-server-side-encryption-customer-algorithm - // - // - x-amz-server-side-encryption-customer-key - // - // - x-amz-server-side-encryption-customer-key-MD5 - // - // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the customer-provided encryption key - // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. - // - // If you encrypt an object by using server-side encryption with customer-provided - // encryption keys (SSE-C) when you store the object in Amazon S3, then when you - // GET the object, you must use the following headers: - // - // - x-amz-server-side-encryption-customer-algorithm - // - // - x-amz-server-side-encryption-customer-key - // - // - x-amz-server-side-encryption-customer-key-MD5 - // - // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - // Version ID used to reference a specific version of the object. - // - // By default, the GetObject operation returns the current version of an object. - // To return a different version, use the versionId subresource. - // - // - If you include a versionId in your request header, you must have the - // s3:GetObjectVersion permission to access a specific version of an object. The - // s3:GetObject permission is not required in this scenario. - // - // - If you request the current version of an object without a specific versionId - // in the request header, only the s3:GetObject permission is required. The - // s3:GetObjectVersion permission is not required in this scenario. - // - // - Directory buckets - S3 Versioning isn't enabled and supported for directory - // buckets. For this API operation, only the null value of the version ID is - // supported by directory buckets. You can only specify null to the versionId - // query parameter in the request. - // - // For more information about versioning, see [PutBucketVersioning]. - // - // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type GetObjectOutput struct { - - // Indicates that a range of bytes was specified in the request. - AcceptRanges *string - - // Object data. - Body io.ReadCloser - - // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more - // information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // response to verify that the checksum type that is received is the same checksum - // type that was specified in the CreateMultipartUpload request. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Specifies presentational information for the object. - ContentDisposition *string - - // Indicates what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // Size of the body in bytes. - ContentLength *int64 - - // The portion of the object returned in the response. - ContentRange *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // Indicates whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - // - // - If the current version of the object is a delete marker, Amazon S3 behaves - // as if the object was deleted and includes x-amz-delete-marker: true in the - // response. - // - // - If the specified version in the request is a delete marker, the response - // returns a 405 Method Not Allowed error and the Last-Modified: timestamp - // response header. - DeleteMarker *bool - - // An entity tag (ETag) is an opaque identifier assigned by a web server to a - // specific version of a resource found at a URL. - ETag *string - - // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - // - // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html - Expiration *string - - // The date and time at which the object is no longer cacheable. - // - // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using - // the ExpiresString field which contains the unparsed value from the service - // response. - Expires *time.Time - - // The unparsed value of the Expires field from the service response. Prefer use - // of this value over the normal Expires response field where possible. - ExpiresString *string - - // Date and time when the object was last modified. - // - // General purpose buckets - When you specify a versionId of the object in your - // request, if the specified version in the request is a delete marker, the - // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp - // response header. - LastModified *time.Time - - // A map of metadata to store with the object in S3. - // - // Map keys will be normalized to lower-case. - Metadata map[string]string - - // This is set to the number of metadata entries not returned in the headers that - // are prefixed with x-amz-meta- . This can happen if you create metadata using an - // API like SOAP that supports more flexible metadata than the REST API. For - // example, using SOAP, you can create metadata whose values are not legal HTTP - // headers. - // - // This functionality is not supported for directory buckets. - MissingMeta *int32 - - // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. - // - // This functionality is not supported for directory buckets. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode that's currently in place for this object. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // The date and time when this object's Object Lock will expire. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // The count of parts this object has. This value is only returned if you specify - // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount *int32 - - // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. - // - // This functionality is not supported for directory buckets. - ReplicationStatus types.ReplicationStatus - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Provides information about object restoration action and expiration time of the - // restored object copy. - // - // This functionality is not supported for directory buckets. Directory buckets - // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in - // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage - // class) in Dedicated Local Zones. - Restore *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. - // - // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. - StorageClass types.StorageClass - - // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. - // - // You can use [GetObjectTagging] to retrieve the tag set associated with an object. - // - // This functionality is not supported for directory buckets. - // - // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html - TagCount *int32 - - // Version ID of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addResponseChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addGetObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObject", - } -} - -// getGetObjectRequestValidationModeMember gets the request checksum validation -// mode provided as input. -func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { - in := input.(*GetObjectInput) - if len(in.ChecksumMode) == 0 { - return "", false - } - return string(in.ChecksumMode), true -} - -func setGetObjectRequestValidationModeMember(input interface{}, mode string) { - in := input.(*GetObjectInput) - in.ChecksumMode = types.ChecksumMode(mode) -} - -func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ - GetValidationMode: getGetObjectRequestValidationModeMember, - SetValidationMode: setGetObjectRequestValidationModeMember, - ResponseChecksumValidation: options.ResponseChecksumValidation, - ValidationAlgorithms: []string{"CRC64NVME", "CRC32", "CRC32C", "SHA256", "SHA1"}, - IgnoreMultipartValidation: true, - LogValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, - LogMultipartValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, - }) -} - -// getGetObjectBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, -func getGetObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignGetObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &GetObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns, - c.client.addOperationGetObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addGetObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go deleted file mode 100644 index ec7554529bb6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the access control list (ACL) of an object. To use this operation, you -// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For -// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// By default, GET returns ACL information about the current version of an object. -// To return ACL information about a different version, use the versionId -// subresource. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// requests to read ACLs are still supported and return the -// bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. -// -// The following operations are related to GetObjectAcl : -// -// [GetObject] -// -// [GetObjectAttributes] -// -// [DeleteObject] -// -// [PutObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { - if params == nil { - params = &GetObjectAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, c.addOperationGetObjectAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectAclInput struct { - - // The bucket name that contains the object for which to get the ACL information. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key of the object for which to get the ACL information. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Version ID used to reference a specific version of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type GetObjectAclOutput struct { - - // A list of grants. - Grants []types.Grant - - // Container for the bucket owner's display name and ID. - Owner *types.Owner - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectAcl", - } -} - -// getGetObjectAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetObjectAclBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go deleted file mode 100644 index d4c0f41c6470..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ /dev/null @@ -1,560 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Retrieves all of the metadata from an object without returning the object -// itself. This operation is useful if you're interested only in an object's -// metadata. -// -// GetObjectAttributes combines the functionality of HeadObject and ListParts . All -// of the data returned with both of those individual calls can be returned with a -// single call to GetObjectAttributes . -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - To use GetObjectAttributes , you must -// have READ access to the object. -// -// The other permissions that you need to use this operation depend on whether the -// -// bucket is versioned and if a version ID is passed in the GetObjectAttributes -// request. -// -// - If you pass a version ID in your request, you need both the -// s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions. -// -// - If you do not pass a version ID in your request, you need the s3:GetObject -// and s3:GetObjectAttributes permissions. -// -// For more information, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. -// -// If the object that you request does not exist, the error Amazon S3 returns -// -// depends on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found ("no such key") error. -// -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden ("access denied") error. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Encryption Encryption request headers, like x-amz-server-side-encryption , -// should not be sent for HEAD requests if your object uses server-side encryption -// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side -// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3). The -// x-amz-server-side-encryption header is used when you PUT an object to S3 and -// want to specify the encryption method. If you include this header in a GET -// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad -// Request error. It's because the encryption method can't be changed when you -// retrieve the object. -// -// If you encrypted an object when you stored the object in Amazon S3 by using -// server-side encryption with customer-provided encryption keys (SSE-C), then when -// you retrieve the metadata from the object, you must use the following headers. -// These headers provide the server with the encryption key required to retrieve -// the object's metadata. The headers are: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. -// -// Directory bucket permissions - For directory buckets, there are only two -// supported options for server-side encryption: server-side encryption with Amazon -// S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses -// the desired encryption configuration and you don't override the bucket default -// encryption in your CreateSession requests or PUT object requests. Then, new -// objects are automatically encrypted with the desired encryption settings. For -// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about -// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. -// -// Versioning Directory buckets - S3 Versioning isn't enabled and supported for -// directory buckets. For this API operation, only the null value of the version -// ID is supported by directory buckets. You can only specify null to the versionId -// query parameter in the request. -// -// Conditional request headers Consider the following when using request headers: -// -// - If both of the If-Match and If-Unmodified-Since headers are present in the -// request as follows, then Amazon S3 returns the HTTP status code 200 OK and the -// data requested: -// -// - If-Match condition evaluates to true . -// -// - If-Unmodified-Since condition evaluates to false . -// -// For more information about conditional requests, see [RFC 7232]. -// -// - If both of the If-None-Match and If-Modified-Since headers are present in -// the request as follows, then Amazon S3 returns the HTTP status code 304 Not -// Modified : -// -// - If-None-Match condition evaluates to false . -// -// - If-Modified-Since condition evaluates to true . -// -// For more information about conditional requests, see [RFC 7232]. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following actions are related to GetObjectAttributes : -// -// [GetObject] -// -// [GetObjectAcl] -// -// [GetObjectLegalHold] -// -// [GetObjectLockConfiguration] -// -// [GetObjectRetention] -// -// [GetObjectTagging] -// -// [HeadObject] -// -// [ListParts] -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [RFC 7232]: https://tools.ietf.org/html/rfc7232 -// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html -// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { - if params == nil { - params = &GetObjectAttributesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectAttributesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectAttributesInput struct { - - // The name of the bucket that contains the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The object key. - // - // This member is required. - Key *string - - // Specifies the fields at the root level that you want returned in the response. - // Fields that you do not specify are not returned. - // - // This member is required. - ObjectAttributes []types.ObjectAttributes - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Sets the maximum number of parts to return. For more information, see [Uploading and copying objects using multipart upload in Amazon S3] in the - // Amazon Simple Storage Service user guide. - // - // [Uploading and copying objects using multipart upload in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html - MaxParts *int32 - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. For more information, see [Uploading and copying objects using multipart upload in Amazon S3]in the Amazon Simple - // Storage Service user guide. - // - // [Uploading and copying objects using multipart upload in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html - PartNumberMarker *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // The version ID used to reference a specific version of the object. - // - // S3 Versioning isn't enabled and supported for directory buckets. For this API - // operation, only the null value of the version ID is supported by directory - // buckets. You can only specify null to the versionId query parameter in the - // request. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectAttributesOutput struct { - - // The checksum or digest of the object. - Checksum *types.Checksum - - // Specifies whether the object retrieved was ( true ) or was not ( false ) a - // delete marker. If false , this response header does not appear in the response. - // To learn more about delete markers, see [Working with delete markers]. - // - // This functionality is not supported for directory buckets. - // - // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html - DeleteMarker *bool - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. - ETag *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // A collection of parts associated with a multipart upload. - ObjectParts *types.GetObjectAttributesParts - - // The size of the object in bytes. - ObjectSize *int64 - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. - // - // For more information, see [Storage Classes]. - // - // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The version ID of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAttributes"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectAttributesInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectAttributes", - } -} - -// getGetObjectAttributesBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectAttributesInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectAttributesBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go deleted file mode 100644 index 2ceb969b0597..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ /dev/null @@ -1,299 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets an object's current legal hold status. For more information, see [Locking Objects]. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// The following action is related to GetObjectLegalHold : -// -// [GetObjectAttributes] -// -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { - if params == nil { - params = &GetObjectLegalHoldInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, c.addOperationGetObjectLegalHoldMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectLegalHoldOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectLegalHoldInput struct { - - // The bucket name containing the object whose legal hold status you want to - // retrieve. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object whose legal hold status you want to retrieve. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The version ID of the object whose legal hold status you want to retrieve. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectLegalHoldOutput struct { - - // The current legal hold status for the specified object. - LegalHold *types.ObjectLockLegalHold - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLegalHold"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectLegalHoldInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectLegalHold", - } -} - -// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectLegalHoldInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectLegalHoldBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go deleted file mode 100644 index 401fe9c23560..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ /dev/null @@ -1,278 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object placed -// in the specified bucket. For more information, see [Locking Objects]. -// -// The following action is related to GetObjectLockConfiguration : -// -// [GetObjectAttributes] -// -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { - if params == nil { - params = &GetObjectLockConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, c.addOperationGetObjectLockConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectLockConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectLockConfigurationInput struct { - - // The bucket whose Object Lock configuration you want to retrieve. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectLockConfigurationOutput struct { - - // The specified bucket's Object Lock configuration. - ObjectLockConfiguration *types.ObjectLockConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLockConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectLockConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectLockConfiguration", - } -} - -// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectLockConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectLockConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go deleted file mode 100644 index 58289b515afe..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ /dev/null @@ -1,299 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves an object's retention settings. For more information, see [Locking Objects]. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// The following action is related to GetObjectRetention : -// -// [GetObjectAttributes] -// -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { - if params == nil { - params = &GetObjectRetentionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, c.addOperationGetObjectRetentionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectRetentionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectRetentionInput struct { - - // The bucket name containing the object whose retention settings you want to - // retrieve. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object whose retention settings you want to retrieve. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The version ID for the object whose retention settings you want to retrieve. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectRetentionOutput struct { - - // The container element for an object's retention settings. - Retention *types.ObjectLockRetention - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectRetention"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectRetentionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectRetention", - } -} - -// getGetObjectRetentionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectRetentionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectRetentionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go deleted file mode 100644 index fac5d36b9e9d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ /dev/null @@ -1,328 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the tag-set of an object. You send the GET request against the tagging -// subresource associated with the object. -// -// To use this operation, you must have permission to perform the -// s3:GetObjectTagging action. By default, the GET action returns information about -// current version of an object. For a versioned bucket, you can have multiple -// versions of an object in your bucket. To retrieve tags of any other version, use -// the versionId query parameter. You also need permission for the -// s3:GetObjectVersionTagging action. -// -// By default, the bucket owner has this permission and can grant this permission -// to others. -// -// For information about the Amazon S3 object tagging feature, see [Object Tagging]. -// -// The following actions are related to GetObjectTagging : -// -// [DeleteObjectTagging] -// -// [GetObjectAttributes] -// -// [PutObjectTagging] -// -// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html -// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html -func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { - if params == nil { - params = &GetObjectTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, c.addOperationGetObjectTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectTaggingInput struct { - - // The bucket name containing the object for which to get the tagging information. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which to get the tagging information. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The versionId of the object for which to get the tagging information. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectTaggingOutput struct { - - // Contains the tag set. - // - // This member is required. - TagSet []types.Tag - - // The versionId of the object for which you got the tagging information. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectTagging", - } -} - -// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go deleted file mode 100644 index 1fa6a28626d9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ /dev/null @@ -1,295 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -// This operation is not supported for directory buckets. -// -// Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. -// -// You can get torrent only for objects that are less than 5 GB in size, and that -// are not encrypted using server-side encryption with a customer-provided -// encryption key. -// -// To use GET, you must have READ access to the object. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// The following action is related to GetObjectTorrent : -// -// [GetObject] -// -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { - if params == nil { - params = &GetObjectTorrentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, c.addOperationGetObjectTorrentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectTorrentOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectTorrentInput struct { - - // The name of the bucket containing the object for which to get the torrent files. - // - // This member is required. - Bucket *string - - // The object key for which to get the information. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectTorrentOutput struct { - - // A Bencoded dictionary as defined by the BitTorrent specification - Body io.ReadCloser - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTorrent"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectTorrentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectTorrent", - } -} - -// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectTorrentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectTorrentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go deleted file mode 100644 index 417ca07a77e5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ /dev/null @@ -1,288 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For -// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an -// object, it checks the PublicAccessBlock configuration for both the bucket (or -// the bucket that contains the object) and the bucket owner's account. If the -// PublicAccessBlock settings are different between the bucket and the account, -// Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see [The Meaning of "Public"]. -// -// The following operations are related to GetPublicAccessBlock : -// -// [Using Amazon S3 Block Public Access] -// -// [PutPublicAccessBlock] -// -// [GetPublicAccessBlock] -// -// [DeletePublicAccessBlock] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { - if params == nil { - params = &GetPublicAccessBlockInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, c.addOperationGetPublicAccessBlockMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetPublicAccessBlockOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetPublicAccessBlockInput struct { - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want - // to retrieve. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetPublicAccessBlockOutput struct { - - // The PublicAccessBlock configuration currently in effect for this Amazon S3 - // bucket. - PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetPublicAccessBlock"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetPublicAccessBlockInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetPublicAccessBlock", - } -} - -// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) { - in := input.(*GetPublicAccessBlockInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetPublicAccessBlockBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go deleted file mode 100644 index 80a8011b51ee..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ /dev/null @@ -1,765 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - smithywaiter "github.com/aws/smithy-go/waiter" - "time" -) - -// You can use this operation to determine if a bucket exists and if you have -// permission to access it. The action returns a 200 OK if the bucket exists and -// you have permission to access it. -// -// If the bucket does not exist or you do not have permission to access it, the -// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found -// code. A message body is not included, so you cannot determine the exception -// beyond these HTTP response codes. -// -// Authentication and authorization General purpose buckets - Request to public -// buckets that grant the s3:ListBucket permission publicly do not need to be -// signed. All other HeadBucket requests must be authenticated and signed by using -// IAM credentials (access key ID and secret access key for the IAM identities). -// All headers with the x-amz- prefix, including x-amz-copy-source , must be -// signed. For more information, see [REST Authentication]. -// -// Directory buckets - You must use IAM credentials to authenticate and authorize -// your access to the HeadBucket API operation, instead of using the temporary -// security credentials through the CreateSession API operation. -// -// Amazon Web Services CLI or SDKs handles authentication and authorization on -// your behalf. -// -// Permissions -// -// - General purpose bucket permissions - To use this operation, you must have -// permissions to perform the s3:ListBucket action. The bucket owner has this -// permission by default and can grant this permission to others. For more -// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - You must have the s3express:CreateSession -// permission in the Action element of a policy. By default, the session is in -// the ReadWrite mode. If you want to restrict the access, you can explicitly set -// the s3express:SessionMode condition key to ReadOnly on the bucket. -// -// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 -// -// User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// You must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style -// requests are not supported. For more information about endpoints in Availability -// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints in -// Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { - if params == nil { - params = &HeadBucketInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, c.addOperationHeadBucketMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*HeadBucketOutput) - out.ResultMetadata = metadata - return out, nil -} - -type HeadBucketInput struct { - - // The bucket name. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points - When you use this API operation with an Object - // Lambda access point, provide the alias of the Object Lambda access point in - // place of the bucket name. If the Object Lambda access point alias in a request - // is not valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see [List of Error Codes]. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type HeadBucketOutput struct { - - // Indicates whether the bucket name used in the request is an access point alias. - // - // For directory buckets, the value of this field is false . - AccessPointAlias *bool - - // The Amazon Resource Name (ARN) of the S3 bucket. ARNs uniquely identify Amazon - // Web Services resources across all of Amazon Web Services. - // - // This parameter is only supported for S3 directory buckets. For more - // information, see [Using tags with directory buckets]. - // - // [Using tags with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html - BucketArn *string - - // The name of the location where the bucket will be created. - // - // For directory buckets, the Zone ID of the Availability Zone or the Local Zone - // where the bucket is created. An example Zone ID value for an Availability Zone - // is usw2-az1 . - // - // This functionality is only supported by directory buckets. - BucketLocationName *string - - // The type of location where the bucket is created. - // - // This functionality is only supported by directory buckets. - BucketLocationType types.LocationType - - // The Region that the bucket is located. - BucketRegion *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "HeadBucket"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpHeadBucketValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter -type BucketExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) -} - -// BucketExistsWaiter defines the waiters for BucketExists -type BucketExistsWaiter struct { - client HeadBucketAPIClient - - options BucketExistsWaiterOptions -} - -// NewBucketExistsWaiter constructs a BucketExistsWaiter. -func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter { - options := BucketExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = bucketExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &BucketExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. -func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for BucketExists waiter and returns the -// output of the successful operation. The maxWaitDur is the maximum wait duration -// the waiter will wait. The maxWaitDur is required and must be greater than zero. -func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) (*HeadBucketOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadBucket(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for BucketExists waiter") -} - -func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { - - if err == nil { - return false, nil - } - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return true, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter -type BucketNotExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. - // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) -} - -// BucketNotExistsWaiter defines the waiters for BucketNotExists -type BucketNotExistsWaiter struct { - client HeadBucketAPIClient - - options BucketNotExistsWaiterOptions -} - -// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter. -func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter { - options := BucketNotExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = bucketNotExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &BucketNotExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is -// the maximum wait duration the waiter will wait. The maxWaitDur is required and -// must be greater than zero. -func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for BucketNotExists waiter and returns -// the output of the successful operation. The maxWaitDur is the maximum wait -// duration the waiter will wait. The maxWaitDur is required and must be greater -// than zero. -func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) (*HeadBucketOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadBucket(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for BucketNotExists waiter") -} - -func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return false, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -func (v *HeadBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadBucketAPIClient is a client that implements the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) -} - -var _ HeadBucketAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "HeadBucket", - } -} - -// getHeadBucketBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getHeadBucketBucketMember(input interface{}) (*string, bool) { - in := input.(*HeadBucketInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getHeadBucketBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignHeadBucket is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignHeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &HeadBucketInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "HeadBucket", params, clientOptFns, - c.client.addOperationHeadBucketMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addHeadBucketPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addHeadBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go deleted file mode 100644 index bf93bcfe1000..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ /dev/null @@ -1,1254 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - smithywaiter "github.com/aws/smithy-go/waiter" - "time" -) - -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're interested only in an object's -// metadata. -// -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. Because of this, if the HEAD request generates an error, it returns a -// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 -// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not -// possible to retrieve the exact exception of these error codes. -// -// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. -// -// Permissions -// -// - General purpose bucket permissions - To use HEAD , you must have the -// s3:GetObject permission. You need the relevant read object (or version) -// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 -// User Guide. For more information about the permissions to S3 API operations by -// S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide. -// -// If the object you request doesn't exist, the error that Amazon S3 returns -// -// depends on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden error. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If you enable x-amz-checksum-mode in the request and the object is encrypted -// -// with Amazon Web Services Key Management Service (Amazon Web Services KMS), you -// must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM -// identity-based policies and KMS key policies for the KMS key to retrieve the -// checksum of the object. -// -// Encryption Encryption request headers, like x-amz-server-side-encryption , -// should not be sent for HEAD requests if your object uses server-side encryption -// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side -// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3). The -// x-amz-server-side-encryption header is used when you PUT an object to S3 and -// want to specify the encryption method. If you include this header in a HEAD -// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad -// Request error. It's because the encryption method can't be changed when you -// retrieve the object. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when you -// retrieve the metadata from the object, you must use the following headers to -// provide the encryption key for the server to be able to retrieve the object's -// metadata. The headers are: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. -// -// Directory bucket - For directory buckets, there are only two supported options -// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more -// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. -// -// Versioning -// -// - If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in the -// response. -// -// - If the specified version is a delete marker, the response returns a 405 -// Method Not Allowed error and the Last-Modified: timestamp response header. -// -// - Directory buckets - Delete marker is not supported for directory buckets. -// -// - Directory buckets - S3 Versioning isn't enabled and supported for directory -// buckets. For this API operation, only the null value of the version ID is -// supported by directory buckets. You can only specify null to the versionId -// query parameter in the request. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// For directory buckets, you must make requests for this API operation to the -// Zonal endpoint. These endpoints support virtual-hosted-style requests in the -// format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// The following actions are related to HeadObject : -// -// [GetObject] -// -// [GetObjectAttributes] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { - if params == nil { - params = &HeadObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, c.addOperationHeadObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*HeadObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type HeadObjectInput struct { - - // The name of the bucket that contains the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The object key. - // - // This member is required. - Key *string - - // To retrieve the checksum, this parameter must be enabled. - // - // General purpose buckets - If you enable checksum mode and the object is - // uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you - // must have permission to use the kms:Decrypt action to retrieve the checksum. - // - // Directory buckets - If you enable ChecksumMode and the object is encrypted with - // Amazon Web Services Key Management Service (Amazon Web Services KMS), you must - // also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM - // identity-based policies and KMS key policies for the KMS key to retrieve the - // checksum of the object. - // - // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html - ChecksumMode types.ChecksumMode - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: - // - // - If-Match condition evaluates to true , and; - // - // - If-Unmodified-Since condition evaluates to false ; - // - // Then Amazon S3 returns 200 OK and the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: - // - // - If-None-Match condition evaluates to false , and; - // - // - If-Modified-Since condition evaluates to true ; - // - // Then Amazon S3 returns the 304 Not Modified response code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfModifiedSince *time.Time - - // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: - // - // - If-None-Match condition evaluates to false , and; - // - // - If-Modified-Since condition evaluates to true ; - // - // Then Amazon S3 returns the 304 Not Modified response code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: - // - // - If-Match condition evaluates to true , and; - // - // - If-Unmodified-Since condition evaluates to false ; - // - // Then Amazon S3 returns 200 OK and the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfUnmodifiedSince *time.Time - - // Part number of the object being read. This is a positive integer between 1 and - // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. - // Useful querying about the size of the part and the number of parts in this - // object. - PartNumber *int32 - - // HeadObject returns only the metadata for an object. If the Range is - // satisfiable, only the ContentLength is affected in the response. If the Range - // is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error. - Range *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string - - // Sets the Content-Disposition header of the response. - ResponseContentDisposition *string - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string - - // Sets the Content-Type header of the response. - ResponseContentType *string - - // Sets the Expires header of the response. - ResponseExpires *time.Time - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // Version ID used to reference a specific version of the object. - // - // For directory buckets in this API operation, only the null value of the version - // ID is supported. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type HeadObjectOutput struct { - - // Indicates that a range of bytes was specified. - AcceptRanges *string - - // The archive state of the head object. - // - // This functionality is not supported for directory buckets. - ArchiveStatus types.ArchiveStatus - - // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only - // present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more - // information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // response to verify that the checksum type that is received is the same checksum - // type that was specified in CreateMultipartUpload request. For more information, - // see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Specifies presentational information for the object. - ContentDisposition *string - - // Indicates what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // Size of the body in bytes. - ContentLength *int64 - - // The portion of the object returned in the response for a GET request. - ContentRange *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - // - // This functionality is not supported for directory buckets. - DeleteMarker *bool - - // An entity tag (ETag) is an opaque identifier assigned by a web server to a - // specific version of a resource found at a URL. - ETag *string - - // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - // - // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html - Expiration *string - - // The date and time at which the object is no longer cacheable. - // - // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using - // the ExpiresString field which contains the unparsed value from the service - // response. - Expires *time.Time - - // The unparsed value of the Expires field from the service response. Prefer use - // of this value over the normal Expires response field where possible. - ExpiresString *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // A map of metadata to store with the object in S3. - // - // Map keys will be normalized to lower-case. - Metadata map[string]string - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. - // - // This functionality is not supported for directory buckets. - MissingMeta *int32 - - // Specifies whether a legal hold is in effect for this object. This header is - // only returned if the requester has the s3:GetObjectLegalHold permission. This - // header is not returned if the specified version of this object has never had a - // legal hold applied. For more information about S3 Object Lock, see [Object Lock]. - // - // This functionality is not supported for directory buckets. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode, if any, that's in effect for this object. This header is - // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see [Object Lock]. - // - // This functionality is not supported for directory buckets. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html - ObjectLockMode types.ObjectLockMode - - // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // The count of parts this object has. This value is only returned if you specify - // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount *int32 - - // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. - // - // In replication, you have a source bucket on which you configure replication and - // destination bucket or buckets where Amazon S3 stores object replicas. When you - // request an object ( GetObject ) or object metadata ( HeadObject ) from these - // buckets, Amazon S3 will return the x-amz-replication-status header in the - // response as follows: - // - // - If requesting an object from the source bucket, Amazon S3 will return the - // x-amz-replication-status header if the object in your request is eligible for - // replication. - // - // For example, suppose that in your replication configuration, you specify object - // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix - // TaxDocs . Any objects you upload with this key name prefix, for example - // TaxDocs/document1.pdf , are eligible for replication. For any object request - // with this key name prefix, Amazon S3 will return the x-amz-replication-status - // header with value PENDING, COMPLETED or FAILED indicating object replication - // status. - // - // - If requesting an object from a destination bucket, Amazon S3 will return - // the x-amz-replication-status header with value REPLICA if the object in your - // request is a replica that Amazon S3 created and there is no replica modification - // replication in progress. - // - // - When replicating objects to multiple destination buckets, the - // x-amz-replication-status header acts differently. The header of the source - // object will only return a value of COMPLETED when replication is successful to - // all destinations. The header will remain at value PENDING until replication has - // completed for all destinations. If one or more destinations fails replication - // the header will return FAILED. - // - // For more information, see [Replication]. - // - // This functionality is not supported for directory buckets. - // - // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - ReplicationStatus types.ReplicationStatus - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If the object is an archived object (an object whose storage class is GLACIER), - // the response includes this header if either the archive restoration is in - // progress (see [RestoreObject]or an archive copy is already restored. - // - // If an archive copy is already restored, the header value indicates when Amazon - // S3 is scheduled to delete the object copy. For example: - // - // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 - // GMT" - // - // If the object restoration is in progress, the header returns the value - // ongoing-request="true" . - // - // For more information about archiving objects, see [Transitioning Objects: General Considerations]. - // - // This functionality is not supported for directory buckets. Directory buckets - // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in - // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage - // class) in Dedicated Local Zones. - // - // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations - // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html - Restore *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. - // - // For more information, see [Storage Classes]. - // - // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. - // - // You can use [GetObjectTagging] to retrieve the tag set associated with an object. - // - // This functionality is not supported for directory buckets. - // - // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html - TagCount *int32 - - // Version ID of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "HeadObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpHeadObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter -type ObjectExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) -} - -// ObjectExistsWaiter defines the waiters for ObjectExists -type ObjectExistsWaiter struct { - client HeadObjectAPIClient - - options ObjectExistsWaiterOptions -} - -// NewObjectExistsWaiter constructs a ObjectExistsWaiter. -func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter { - options := ObjectExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = objectExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &ObjectExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. -func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for ObjectExists waiter and returns the -// output of the successful operation. The maxWaitDur is the maximum wait duration -// the waiter will wait. The maxWaitDur is required and must be greater than zero. -func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) (*HeadObjectOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadObject(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for ObjectExists waiter") -} - -func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { - - if err == nil { - return false, nil - } - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return true, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter -type ObjectNotExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. - // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) -} - -// ObjectNotExistsWaiter defines the waiters for ObjectNotExists -type ObjectNotExistsWaiter struct { - client HeadObjectAPIClient - - options ObjectNotExistsWaiterOptions -} - -// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter. -func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter { - options := ObjectNotExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = objectNotExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &ObjectNotExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is -// the maximum wait duration the waiter will wait. The maxWaitDur is required and -// must be greater than zero. -func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for ObjectNotExists waiter and returns -// the output of the successful operation. The maxWaitDur is the maximum wait -// duration the waiter will wait. The maxWaitDur is required and must be greater -// than zero. -func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) (*HeadObjectOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadObject(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for ObjectNotExists waiter") -} - -func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return false, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -func (v *HeadObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadObjectAPIClient is a client that implements the HeadObject operation. -type HeadObjectAPIClient interface { - HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) -} - -var _ HeadObjectAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "HeadObject", - } -} - -// getHeadObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getHeadObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*HeadObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getHeadObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignHeadObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignHeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &HeadObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "HeadObject", params, clientOptFns, - c.client.addOperationHeadObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addHeadObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addHeadObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go deleted file mode 100644 index 985bfc486888..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ /dev/null @@ -1,306 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Lists the analytics configurations for the bucket. You can have up to 1,000 -// analytics configurations per bucket. -// -// This action supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element in -// the response. If there are no more configurations to list, IsTruncated is set -// to false. If there are more configurations to list, IsTruncated is set to true, -// and there will be a value in NextContinuationToken . You use the -// NextContinuationToken value to continue the pagination of the list by passing -// the value in continuation-token in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the -// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. -// -// The following operations are related to ListBucketAnalyticsConfigurations : -// -// [GetBucketAnalyticsConfiguration] -// -// [DeleteBucketAnalyticsConfiguration] -// -// [PutBucketAnalyticsConfiguration] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html -// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { - if params == nil { - params = &ListBucketAnalyticsConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, c.addOperationListBucketAnalyticsConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketAnalyticsConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketAnalyticsConfigurationsInput struct { - - // The name of the bucket from which analytics configurations are retrieved. - // - // This member is required. - Bucket *string - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListBucketAnalyticsConfigurationsOutput struct { - - // The list of analytics configurations for a bucket. - AnalyticsConfigurationList []types.AnalyticsConfiguration - - // The marker that is used as a starting point for this analytics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string - - // Indicates whether the returned list of analytics configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken will be provided for a subsequent request. - IsTruncated *bool - - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken . The token is obfuscated and is not a usable value. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketAnalyticsConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketAnalyticsConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketAnalyticsConfigurations", - } -} - -// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketAnalyticsConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go deleted file mode 100644 index c800119a1475..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Lists the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to ListBucketIntelligentTieringConfigurations include: -// -// [DeleteBucketIntelligentTieringConfiguration] -// -// [PutBucketIntelligentTieringConfiguration] -// -// [GetBucketIntelligentTieringConfiguration] -// -// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html -// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html -func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { - if params == nil { - params = &ListBucketIntelligentTieringConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, c.addOperationListBucketIntelligentTieringConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketIntelligentTieringConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketIntelligentTieringConfigurationsInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListBucketIntelligentTieringConfigurationsOutput struct { - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string - - // The list of S3 Intelligent-Tiering configurations for a bucket. - IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration - - // Indicates whether the returned list of analytics configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken will be provided for a subsequent request. - IsTruncated *bool - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketIntelligentTieringConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketIntelligentTieringConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketIntelligentTieringConfigurations", - } -} - -// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketIntelligentTieringConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go deleted file mode 100644 index 90d3a33bc0c7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns a list of S3 Inventory configurations for the bucket. You can have up -// to 1,000 analytics configurations per bucket. -// -// This action supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. If -// there are more configurations to list, IsTruncated is set to true, and there is -// a value in NextContinuationToken . You use the NextContinuationToken value to -// continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the -// s3:GetInventoryConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] -// -// The following operations are related to ListBucketInventoryConfigurations : -// -// [GetBucketInventoryConfiguration] -// -// [DeleteBucketInventoryConfiguration] -// -// [PutBucketInventoryConfiguration] -// -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html -// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html -func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { - if params == nil { - params = &ListBucketInventoryConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, c.addOperationListBucketInventoryConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketInventoryConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketInventoryConfigurationsInput struct { - - // The name of the bucket containing the inventory configurations to retrieve. - // - // This member is required. - Bucket *string - - // The marker used to continue an inventory configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value that - // Amazon S3 understands. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListBucketInventoryConfigurationsOutput struct { - - // If sent in the request, the marker that is used as a starting point for this - // inventory configuration list response. - ContinuationToken *string - - // The list of inventory configurations for a bucket. - InventoryConfigurationList []types.InventoryConfiguration - - // Tells whether the returned list of inventory configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken is provided for a subsequent request. - IsTruncated *bool - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketInventoryConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketInventoryConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketInventoryConfigurations", - } -} - -// getListBucketInventoryConfigurationsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketInventoryConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go deleted file mode 100644 index 4f9402d666cd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ /dev/null @@ -1,310 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Lists the metrics configurations for the bucket. The metrics configurations are -// only for the request metrics of the bucket and do not provide information on -// daily storage metrics. You can have up to 1,000 configurations per bucket. -// -// This action supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. If -// there are more configurations to list, IsTruncated is set to true, and there is -// a value in NextContinuationToken . You use the NextContinuationToken value to -// continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the -// s3:GetMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For more information about metrics configurations and CloudWatch request -// metrics, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to ListBucketMetricsConfigurations : -// -// [PutBucketMetricsConfiguration] -// -// [GetBucketMetricsConfiguration] -// -// [DeleteBucketMetricsConfiguration] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { - if params == nil { - params = &ListBucketMetricsConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, c.addOperationListBucketMetricsConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketMetricsConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketMetricsConfigurationsInput struct { - - // The name of the bucket containing the metrics configurations to retrieve. - // - // This member is required. - Bucket *string - - // The marker that is used to continue a metrics configuration listing that has - // been truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value that - // Amazon S3 understands. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type ListBucketMetricsConfigurationsOutput struct { - - // The marker that is used as a starting point for this metrics configuration list - // response. This value is present if it was sent in the request. - ContinuationToken *string - - // Indicates whether the returned list of metrics configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken will be provided for a subsequent request. - IsTruncated *bool - - // The list of metrics configurations for a bucket. - MetricsConfigurationList []types.MetricsConfiguration - - // The marker used to continue a metrics configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value that - // Amazon S3 understands. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketMetricsConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketMetricsConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketMetricsConfigurations", - } -} - -// getListBucketMetricsConfigurationsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketMetricsConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go deleted file mode 100644 index 21c03cd37481..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ /dev/null @@ -1,390 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// Returns a list of all buckets owned by the authenticated sender of the request. -// To grant IAM permission to use this operation, you must add the -// s3:ListAllMyBuckets policy action. -// -// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. -// -// We strongly recommend using only paginated ListBuckets requests. Unpaginated -// ListBuckets requests are only supported for Amazon Web Services accounts set to -// the default general purpose bucket quota of 10,000. If you have an approved -// general purpose bucket quota above 10,000, you must send paginated ListBuckets -// requests to list your account’s buckets. All unpaginated ListBuckets requests -// will be rejected for Amazon Web Services accounts with a general purpose bucket -// quota greater than 10,000. -// -// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html -func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { - if params == nil { - params = &ListBucketsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketsInput struct { - - // Limits the response to buckets that are located in the specified Amazon Web - // Services Region. The Amazon Web Services Region must be expressed according to - // the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) - // Region. For a list of the valid values for all of the Amazon Web Services - // Regions, see [Regions and Endpoints]. - // - // Requests made to a Regional endpoint that is different from the bucket-region - // parameter are not supported. For example, if you want to limit the response to - // your buckets in Region us-west-2 , the request must be made to an endpoint in - // Region us-west-2 . - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - BucketRegion *string - - // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. - // - // Length Constraints: Minimum length of 0. Maximum length of 1024. - // - // Required: No. - // - // If you specify the bucket-region , prefix , or continuation-token query - // parameters without using max-buckets to set the maximum number of buckets - // returned in the response, Amazon S3 applies a default page size of 10,000 and - // provides a continuation token if there are more buckets. - ContinuationToken *string - - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - MaxBuckets *int32 - - // Limits the response to bucket names that begin with the specified bucket name - // prefix. - Prefix *string - - noSmithyDocumentSerde -} - -type ListBucketsOutput struct { - - // The list of buckets owned by the requester. - Buckets []types.Bucket - - // ContinuationToken is included in the response when there are more buckets that - // can be listed with pagination. The next ListBuckets request to Amazon S3 can be - // continued with this ContinuationToken . ContinuationToken is obfuscated and is - // not a real bucket. - ContinuationToken *string - - // The owner of the buckets listed. - Owner *types.Owner - - // If Prefix was sent with the request, it is included in the response. - // - // All bucket names in the response begin with the specified bucket name prefix. - Prefix *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBuckets"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListBucketsPaginatorOptions is the paginator options for ListBuckets -type ListBucketsPaginatorOptions struct { - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListBucketsPaginator is a paginator for ListBuckets -type ListBucketsPaginator struct { - options ListBucketsPaginatorOptions - client ListBucketsAPIClient - params *ListBucketsInput - nextToken *string - firstPage bool -} - -// NewListBucketsPaginator returns a new ListBucketsPaginator -func NewListBucketsPaginator(client ListBucketsAPIClient, params *ListBucketsInput, optFns ...func(*ListBucketsPaginatorOptions)) *ListBucketsPaginator { - if params == nil { - params = &ListBucketsInput{} - } - - options := ListBucketsPaginatorOptions{} - if params.MaxBuckets != nil { - options.Limit = *params.MaxBuckets - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListBucketsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.ContinuationToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListBucketsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListBuckets page. -func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.ContinuationToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxBuckets = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListBuckets(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.ContinuationToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListBucketsAPIClient is a client that implements the ListBuckets operation. -type ListBucketsAPIClient interface { - ListBuckets(context.Context, *ListBucketsInput, ...func(*Options)) (*ListBucketsOutput, error) -} - -var _ ListBucketsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBuckets", - } -} - -func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: nopGetBucketAccessor, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: false, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go deleted file mode 100644 index edf98a062b1e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a list of all Amazon S3 directory buckets owned by the authenticated -// sender of the request. For more information about directory buckets, see [Directory buckets]in the -// Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in -// an IAM identity-based policy instead of a bucket policy. Cross-account access to -// this API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// The BucketRegion response element is not part of the ListDirectoryBuckets -// Response Syntax. -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { - if params == nil { - params = &ListDirectoryBucketsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListDirectoryBucketsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListDirectoryBucketsInput struct { - - // ContinuationToken indicates to Amazon S3 that the list is being continued on - // buckets in this account with a token. ContinuationToken is obfuscated and is - // not a real bucket name. You can use this ContinuationToken for the pagination - // of the list results. - ContinuationToken *string - - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - MaxDirectoryBuckets *int32 - - noSmithyDocumentSerde -} - -func (in *ListDirectoryBucketsInput) bindEndpointParams(p *EndpointParameters) { - - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListDirectoryBucketsOutput struct { - - // The list of buckets owned by the requester. - Buckets []types.Bucket - - // If ContinuationToken was sent with the request, it is included in the response. - // You can use the returned ContinuationToken for pagination of the list response. - ContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListDirectoryBuckets{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDirectoryBuckets{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListDirectoryBuckets"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListDirectoryBucketsPaginatorOptions is the paginator options for -// ListDirectoryBuckets -type ListDirectoryBucketsPaginatorOptions struct { - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListDirectoryBucketsPaginator is a paginator for ListDirectoryBuckets -type ListDirectoryBucketsPaginator struct { - options ListDirectoryBucketsPaginatorOptions - client ListDirectoryBucketsAPIClient - params *ListDirectoryBucketsInput - nextToken *string - firstPage bool -} - -// NewListDirectoryBucketsPaginator returns a new ListDirectoryBucketsPaginator -func NewListDirectoryBucketsPaginator(client ListDirectoryBucketsAPIClient, params *ListDirectoryBucketsInput, optFns ...func(*ListDirectoryBucketsPaginatorOptions)) *ListDirectoryBucketsPaginator { - if params == nil { - params = &ListDirectoryBucketsInput{} - } - - options := ListDirectoryBucketsPaginatorOptions{} - if params.MaxDirectoryBuckets != nil { - options.Limit = *params.MaxDirectoryBuckets - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListDirectoryBucketsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.ContinuationToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListDirectoryBucketsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListDirectoryBuckets page. -func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.ContinuationToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxDirectoryBuckets = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.ContinuationToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListDirectoryBucketsAPIClient is a client that implements the -// ListDirectoryBuckets operation. -type ListDirectoryBucketsAPIClient interface { - ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) -} - -var _ ListDirectoryBucketsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListDirectoryBuckets", - } -} - -func addListDirectoryBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: nopGetBucketAccessor, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go deleted file mode 100644 index 3de297cc643f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ /dev/null @@ -1,558 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation lists in-progress multipart uploads in a bucket. An in-progress -// multipart upload is a multipart upload that has been initiated by the -// CreateMultipartUpload request, but has not yet been completed or aborted. -// -// Directory buckets - If multipart uploads in a directory bucket are in progress, -// you can't delete the bucket until all the in-progress multipart uploads are -// aborted or completed. To delete these in-progress multipart uploads, use the -// ListMultipartUploads operation to list the in-progress multipart uploads in the -// bucket and use the AbortMultipartUpload operation to abort all the in-progress -// multipart uploads. -// -// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads -// in the response. The limit of 1,000 multipart uploads is also the default value. -// You can further limit the number of uploads in a response by specifying the -// max-uploads request parameter. If there are more than 1,000 multipart uploads -// that satisfy your ListMultipartUploads request, the response returns an -// IsTruncated element with the value of true , a NextKeyMarker element, and a -// NextUploadIdMarker element. To list the remaining multipart uploads, you need to -// make subsequent ListMultipartUploads requests. In these requests, include two -// query parameters: key-marker and upload-id-marker . Set the value of key-marker -// to the NextKeyMarker value from the previous response. Similarly, set the value -// of upload-id-marker to the NextUploadIdMarker value from the previous response. -// -// Directory buckets - The upload-id-marker element and the NextUploadIdMarker -// element aren't supported by directory buckets. To list the additional multipart -// uploads, you only need to set the value of key-marker to the NextKeyMarker -// value from the previous response. -// -// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Sorting of multipart uploads in response -// -// - General purpose bucket - In the ListMultipartUploads response, the multipart -// uploads are sorted based on two criteria: -// -// - Key-based sorting - Multipart uploads are initially sorted in ascending -// order based on their object keys. -// -// - Time-based sorting - For uploads that share the same object key, they are -// further sorted in ascending order based on the upload initiation time. Among -// uploads with the same key, the one that was initiated first will appear before -// the ones that were initiated later. -// -// - Directory bucket - In the ListMultipartUploads response, the multipart -// uploads aren't sorted lexicographically based on the object keys. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to ListMultipartUploads : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [ListParts] -// -// [AbortMultipartUpload] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { - if params == nil { - params = &ListMultipartUploadsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, c.addOperationListMultipartUploadsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListMultipartUploadsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListMultipartUploadsInput struct { - - // The name of the bucket to which the multipart upload was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Character you use to group keys. - // - // All keys that contain the same string between the prefix, if specified, and the - // first occurrence of the delimiter after the prefix are grouped under a single - // result element, CommonPrefixes . If you don't specify the prefix parameter, then - // the substring starts at the beginning of the key. The keys that are grouped - // under CommonPrefixes result element are not returned elsewhere in the response. - // - // CommonPrefixes is filtered out from results if it is not lexicographically - // greater than the key-marker. - // - // Directory buckets - For directory buckets, / is the only supported delimiter. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Specifies the multipart upload after which listing should begin. - // - // - General purpose buckets - For general purpose buckets, key-marker is an - // object key. Together with upload-id-marker , this parameter specifies the - // multipart upload after which listing should begin. - // - // If upload-id-marker is not specified, only the keys lexicographically greater - // than the specified key-marker will be included in the list. - // - // If upload-id-marker is specified, any multipart uploads for a key equal to the - // key-marker might also be included, provided those multipart uploads have - // upload IDs lexicographically greater than the specified upload-id-marker . - // - // - Directory buckets - For directory buckets, key-marker is obfuscated and - // isn't a real object key. The upload-id-marker parameter isn't supported by - // directory buckets. To list the additional multipart uploads, you only need to - // set the value of key-marker to the NextKeyMarker value from the previous - // response. - // - // In the ListMultipartUploads response, the multipart uploads aren't sorted - // lexicographically based on the object keys. - KeyMarker *string - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the - // response body. 1,000 is the maximum number of uploads that can be returned in a - // response. - MaxUploads *int32 - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different grouping of - // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter is - // ignored. Otherwise, any multipart uploads for a key equal to the key-marker - // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . - // - // This functionality is not supported for directory buckets. - UploadIdMarker *string - - noSmithyDocumentSerde -} - -func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListMultipartUploadsOutput struct { - - // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. - Bucket *string - - // If you specify a delimiter in the request, then the result returns each - // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - CommonPrefixes []types.CommonPrefix - - // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. - // - // Directory buckets - For directory buckets, / is the only supported delimiter. - Delimiter *string - - // Encoding type used by Amazon S3 to encode object keys in the response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . - EncodingType types.EncodingType - - // Indicates whether the returned list of multipart uploads is truncated. A value - // of true indicates that the list was truncated. The list can be truncated if the - // number of multipart uploads exceeds the limit allowed or specified by max - // uploads. - IsTruncated *bool - - // The key at or after which the listing began. - KeyMarker *string - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int32 - - // When a list is truncated, this element specifies the value that should be used - // for the key-marker request parameter in a subsequent request. - NextKeyMarker *string - - // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. - // - // This functionality is not supported for directory buckets. - NextUploadIdMarker *string - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter is - // ignored. Otherwise, any multipart uploads for a key equal to the key-marker - // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . - // - // This functionality is not supported for directory buckets. - UploadIdMarker *string - - // Container for elements related to a particular multipart upload. A response can - // contain zero or more Upload elements. - Uploads []types.MultipartUpload - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListMultipartUploads"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListMultipartUploadsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListMultipartUploads", - } -} - -// getListMultipartUploadsBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListMultipartUploadsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListMultipartUploadsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go deleted file mode 100644 index 1d443236610d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ /dev/null @@ -1,423 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// Returns metadata about all versions of the objects in a bucket. You can also -// use request parameters as selection criteria to return metadata about a subset -// of all the object versions. -// -// To use this operation, you must have permission to perform the -// s3:ListBucketVersions action. Be aware of the name difference. -// -// A 200 OK response can contain valid or invalid XML. Make sure to design your -// application to parse the contents of the response and handle it appropriately. -// -// To use this operation, you must have READ access to the bucket. -// -// The following operations are related to ListObjectVersions : -// -// [ListObjectsV2] -// -// [GetObject] -// -// [PutObject] -// -// [DeleteObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html -func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { - if params == nil { - params = &ListObjectVersionsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, c.addOperationListObjectVersionsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListObjectVersionsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListObjectVersionsInput struct { - - // The bucket name that contains the objects. - // - // This member is required. - Bucket *string - - // A delimiter is a character that you specify to group keys. All keys that - // contain the same string between the prefix and the first occurrence of the - // delimiter are grouped under a single result element in CommonPrefixes . These - // groups are counted as one result against the max-keys limitation. These keys - // are not returned elsewhere in the response. - // - // CommonPrefixes is filtered out from results if it is not lexicographically - // greater than the key-marker. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. If additional keys satisfy the search criteria, but - // were not returned because max-keys was exceeded, the response contains true . To - // return the additional keys, see key-marker and version-id-marker . - MaxKeys *int32 - - // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. - OptionalObjectAttributes []types.OptionalObjectAttributes - - // Use this parameter to select only those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different groupings of - // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) You can use prefix with delimiter to roll up - // numerous objects into a single result under CommonPrefixes . - Prefix *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the object version you want to start listing from. - VersionIdMarker *string - - noSmithyDocumentSerde -} - -func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListObjectVersionsOutput struct { - - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. - CommonPrefixes []types.CommonPrefix - - // Container for an object that is a delete marker. To learn more about delete - // markers, see [Working with delete markers]. - // - // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html - DeleteMarkers []types.DeleteMarkerEntry - - // The delimiter grouping the included keys. A delimiter is a character that you - // specify to group keys. All keys that contain the same string between the prefix - // and the first occurrence of the delimiter are grouped under a single result - // element in CommonPrefixes . These groups are counted as one result against the - // max-keys limitation. These keys are not returned elsewhere in the response. - Delimiter *string - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . - EncodingType types.EncodingType - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. If your results were truncated, you can make a - // follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the rest of - // the results. - IsTruncated *bool - - // Marks the last key returned in a truncated response. - KeyMarker *string - - // Specifies the maximum number of objects to return. - MaxKeys *int32 - - // The bucket name. - Name *string - - // When the number of responses exceeds the value of MaxKeys , NextKeyMarker - // specifies the first key not returned that satisfies the search criteria. Use - // this value for the key-marker request parameter in a subsequent request. - NextKeyMarker *string - - // When the number of responses exceeds the value of MaxKeys , NextVersionIdMarker - // specifies the first object version not returned that satisfies the search - // criteria. Use this value for the version-id-marker request parameter in a - // subsequent request. - NextVersionIdMarker *string - - // Selects objects that start with the value supplied by this parameter. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Marks the last version of the key returned in a truncated response. - VersionIdMarker *string - - // Container for version information. - Versions []types.ObjectVersion - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectVersions"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListObjectVersionsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListObjectVersions", - } -} - -// getListObjectVersionsBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getListObjectVersionsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListObjectVersionsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListObjectVersionsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go deleted file mode 100644 index abb77c5089f3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ /dev/null @@ -1,447 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// Returns some or all (up to 1,000) of the objects in a bucket. You can use the -// request parameters as selection criteria to return a subset of the objects in a -// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design -// your application to parse the contents of the response and handle it -// appropriately. -// -// This action has been revised. We recommend that you use the newer version, [ListObjectsV2], -// when developing applications. For backward compatibility, Amazon S3 continues to -// support ListObjects . -// -// The following operations are related to ListObjects : -// -// [ListObjectsV2] -// -// [GetObject] -// -// [PutObject] -// -// [CreateBucket] -// -// [ListBuckets] -// -// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html -func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { - if params == nil { - params = &ListObjectsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, c.addOperationListObjectsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListObjectsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListObjectsInput struct { - - // The name of the bucket containing the objects. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // A delimiter is a character that you use to group keys. - // - // CommonPrefixes is filtered out from results if it is not lexicographically - // greater than the key-marker. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. Marker can be any key in the bucket. - Marker *string - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - MaxKeys *int32 - - // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. - OptionalObjectAttributes []types.OptionalObjectAttributes - - // Limits the response to keys that begin with the specified prefix. - Prefix *string - - // Confirms that the requester knows that she or he will be charged for the list - // objects request. Bucket owners need not specify this parameter in their - // requests. - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListObjectsOutput struct { - - // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the next - // occurrence of the string specified by the delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . - // - // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in - // notes/summer/july , the common prefix is notes/summer/ . All of the keys that - // roll up into a common prefix count as a single return when calculating the - // number of returns. - CommonPrefixes []types.CommonPrefix - - // Metadata about each object returned. - Contents []types.Object - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element in the - // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in - // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. - IsTruncated *bool - - // Indicates where in the bucket listing begins. Marker is included in the - // response if it was sent with the request. - Marker *string - - // The maximum number of keys returned in the response body. - MaxKeys *int32 - - // The bucket name. - Name *string - - // When the response is truncated (the IsTruncated element value in the response - // is true ), you can use the key name in this field as the marker parameter in - // the subsequent request to get the next set of objects. Amazon S3 lists objects - // in alphabetical order. - // - // This element is returned only if you have the delimiter request parameter - // specified. If the response does not include the NextMarker element and it is - // truncated, you can use the value of the last Key element in the response as the - // marker parameter in the subsequent request to get the next set of object keys. - NextMarker *string - - // Keys that begin with the indicated prefix. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjects"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListObjectsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListObjectsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListObjectsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListObjects", - } -} - -// getListObjectsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getListObjectsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListObjectsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListObjectsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go deleted file mode 100644 index bf3a492df2af..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ /dev/null @@ -1,627 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns some or all (up to 1,000) of the objects in a bucket with each request. -// You can use the request parameters as selection criteria to return a subset of -// the objects in a bucket. A 200 OK response can contain valid or invalid XML. -// Make sure to design your application to parse the contents of the response and -// handle it appropriately. For more information about listing objects, see [Listing object keys programmatically]in the -// Amazon S3 User Guide. To get a list of your buckets, see [ListBuckets]. -// -// - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't -// return prefixes that are related only to in-progress multipart uploads. -// -// - Directory buckets - For directory buckets, ListObjectsV2 response includes -// the prefixes that are related only to in-progress multipart uploads. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - To use this operation, you must have -// READ access to the bucket. You must have permission to perform the -// s3:ListBucket action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] -// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Sorting order of returned objects -// -// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns -// objects in lexicographical order based on their key names. -// -// - Directory bucket - For directory buckets, ListObjectsV2 does not return -// objects in lexicographical order. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// This section describes the latest revision of this action. We recommend that -// you use this revised API operation for application development. For backward -// compatibility, Amazon S3 continues to support the prior version of this API -// operation, [ListObjects]. -// -// The following operations are related to ListObjectsV2 : -// -// [GetObject] -// -// [PutObject] -// -// [CreateBucket] -// -// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html -// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { - if params == nil { - params = &ListObjectsV2Input{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, c.addOperationListObjectsV2Middlewares) - if err != nil { - return nil, err - } - - out := result.(*ListObjectsV2Output) - out.ResultMetadata = metadata - return out, nil -} - -type ListObjectsV2Input struct { - - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. - ContinuationToken *string - - // A delimiter is a character that you use to group keys. - // - // CommonPrefixes is filtered out from results if it is not lexicographically - // greater than the StartAfter value. - // - // - Directory buckets - For directory buckets, / is the only supported delimiter. - // - // - Directory buckets - When you query ListObjectsV2 with a delimiter during - // in-progress multipart uploads, the CommonPrefixes response parameter contains - // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. - // - // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The owner field is not present in ListObjectsV2 by default. If you want to - // return the owner field with each key in the result, then set the FetchOwner - // field to true . - // - // Directory buckets - For directory buckets, the bucket owner is returned as the - // object owner for all objects. - FetchOwner *bool - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - MaxKeys *int32 - - // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. - // - // This functionality is not supported for directory buckets. - OptionalObjectAttributes []types.OptionalObjectAttributes - - // Limits the response to keys that begin with the specified prefix. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // Confirms that the requester knows that she or he will be charged for the list - // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. - // - // This functionality is not supported for directory buckets. - RequestPayer types.RequestPayer - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. - // - // This functionality is not supported for directory buckets. - StartAfter *string - - noSmithyDocumentSerde -} - -func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListObjectsV2Output struct { - - // All of the keys (up to 1,000) that share the same prefix are grouped together. - // When counting the total numbers of returns by this API operation, this group of - // keys is considered as one item. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the next - // occurrence of the string specified by a delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . - // - // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in - // notes/summer/july , the common prefix is notes/summer/ . All of the keys that - // roll up into a common prefix count as a single return when calculating the - // number of returns. - // - // - Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - // - // - Directory buckets - When you query ListObjectsV2 with a delimiter during - // in-progress multipart uploads, the CommonPrefixes response parameter contains - // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. - // - // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html - CommonPrefixes []types.CommonPrefix - - // Metadata about each object returned. - Contents []types.Object - - // If ContinuationToken was sent with the request, it is included in the - // response. You can use the returned ContinuationToken for pagination of the list - // response. - ContinuationToken *string - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element in the - // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in - // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. - // - // Directory buckets - For directory buckets, / is the only supported delimiter. - Delimiter *string - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // Delimiter, Prefix, Key, and StartAfter . - EncodingType types.EncodingType - - // Set to false if all of the results were returned. Set to true if more keys are - // available to return. If the number of results exceeds that specified by MaxKeys - // , all of the results might not be returned. - IsTruncated *bool - - // KeyCount is the number of keys returned with this request. KeyCount will always - // be less than or equal to the MaxKeys field. For example, if you ask for 50 - // keys, your result will include 50 keys or fewer. - KeyCount *int32 - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - MaxKeys *int32 - - // The bucket name. - Name *string - - // NextContinuationToken is sent when isTruncated is true, which means there are - // more keys in the bucket that can be listed. The next list requests to Amazon S3 - // can be continued with this NextContinuationToken . NextContinuationToken is - // obfuscated and is not a real key - NextContinuationToken *string - - // Keys that begin with the indicated prefix. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If StartAfter was sent with the request, it is included in the response. - // - // This functionality is not supported for directory buckets. - StartAfter *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectsV2"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 -type ListObjectsV2PaginatorOptions struct { - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListObjectsV2Paginator is a paginator for ListObjectsV2 -type ListObjectsV2Paginator struct { - options ListObjectsV2PaginatorOptions - client ListObjectsV2APIClient - params *ListObjectsV2Input - nextToken *string - firstPage bool -} - -// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator -func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator { - if params == nil { - params = &ListObjectsV2Input{} - } - - options := ListObjectsV2PaginatorOptions{} - if params.MaxKeys != nil { - options.Limit = *params.MaxKeys - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListObjectsV2Paginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.ContinuationToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListObjectsV2Paginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListObjectsV2 page. -func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.ContinuationToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxKeys = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = nil - if result.IsTruncated != nil && *result.IsTruncated { - p.nextToken = result.NextContinuationToken - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func (v *ListObjectsV2Input) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) -} - -var _ ListObjectsV2APIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListObjectsV2", - } -} - -// getListObjectsV2BucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getListObjectsV2BucketMember(input interface{}) (*string, bool) { - in := input.(*ListObjectsV2Input) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListObjectsV2BucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go deleted file mode 100644 index cd2184077dd9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ /dev/null @@ -1,615 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// Lists the parts that have been uploaded for a specific multipart upload. -// -// To use this operation, you must provide the upload ID in the request. You -// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. -// -// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of -// 1,000 parts is also the default value. You can restrict the number of parts in a -// response by specifying the max-parts request parameter. If your multipart -// upload consists of more than 1,000 parts, the response returns an IsTruncated -// field with the value of true , and a NextPartNumberMarker element. To list -// remaining uploaded parts, in subsequent ListParts requests, include the -// part-number-marker query string parameter and set its value to the -// NextPartNumberMarker field value from the previous response. -// -// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// If the upload was created using server-side encryption with Key Management -// -// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon -// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt -// action for the ListParts request to succeed. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to ListParts : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [GetObjectAttributes] -// -// [ListMultipartUploads] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { - if params == nil { - params = &ListPartsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, c.addOperationListPartsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListPartsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListPartsInput struct { - - // The name of the bucket to which the parts are being uploaded. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Upload ID identifying the multipart upload whose parts are being listed. - // - // This member is required. - UploadId *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Sets the maximum number of parts to return. - MaxParts *int32 - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerAlgorithm *string - - // The server-side encryption (SSE) customer managed key. This parameter is needed - // only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // The MD5 server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type ListPartsOutput struct { - - // If the bucket has a lifecycle rule configured with an action to abort - // incomplete multipart uploads and the prefix in the lifecycle rule matches the - // object name in the request, then the response includes this header indicating - // when the initiated multipart upload will become eligible for abort operation. - // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. - // - // The response will also include the x-amz-abort-rule-id header that will provide - // the ID of the lifecycle configuration rule that defines this action. - // - // This functionality is not supported for directory buckets. - // - // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config - AbortDate *time.Time - - // This header is returned along with the x-amz-abort-date header. It identifies - // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - // - // This functionality is not supported for directory buckets. - AbortRuleId *string - - // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. - Bucket *string - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // response to verify that the checksum type that is received is the same checksum - // type that was specified in CreateMultipartUpload request. For more information, - // see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Container element that identifies who initiated the multipart upload. If the - // initiator is an Amazon Web Services account, this element provides the same - // information as the Owner element. If the initiator is an IAM User, this element - // provides the user ARN and display name. - Initiator *types.Initiator - - // Indicates whether the returned list of parts is truncated. A true value - // indicates that the list was truncated. A list can be truncated if the number of - // parts exceeds the limit returned in the MaxParts element. - IsTruncated *bool - - // Object key for which the multipart upload was initiated. - Key *string - - // Maximum number of parts that were allowed in the response. - MaxParts *int32 - - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the part-number-marker request parameter in a - // subsequent request. - NextPartNumberMarker *string - - // Container element that identifies the object owner, after the object is - // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. - // - // Directory buckets - The bucket owner is returned as the object owner for all - // the parts. - Owner *types.Owner - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *string - - // Container for elements related to a particular part. A response can contain - // zero or more Part elements. - Parts []types.Part - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // The class of storage used to store the uploaded object. - // - // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. - StorageClass types.StorageClass - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListParts"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListPartsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListPartsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListPartsPaginatorOptions is the paginator options for ListParts -type ListPartsPaginatorOptions struct { - // Sets the maximum number of parts to return. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListPartsPaginator is a paginator for ListParts -type ListPartsPaginator struct { - options ListPartsPaginatorOptions - client ListPartsAPIClient - params *ListPartsInput - nextToken *string - firstPage bool -} - -// NewListPartsPaginator returns a new ListPartsPaginator -func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator { - if params == nil { - params = &ListPartsInput{} - } - - options := ListPartsPaginatorOptions{} - if params.MaxParts != nil { - options.Limit = *params.MaxParts - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListPartsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.PartNumberMarker, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListPartsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListParts page. -func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.PartNumberMarker = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxParts = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListParts(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = nil - if result.IsTruncated != nil && *result.IsTruncated { - p.nextToken = result.NextPartNumberMarker - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func (v *ListPartsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListPartsAPIClient is a client that implements the ListParts operation. -type ListPartsAPIClient interface { - ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) -} - -var _ ListPartsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListParts", - } -} - -// getListPartsBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, -func getListPartsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListPartsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListPartsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go deleted file mode 100644 index f8f349db7240..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ /dev/null @@ -1,335 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer -// Acceleration is a bucket-level feature that enables you to perform faster data -// transfers to Amazon S3. -// -// To use this operation, you must have permission to perform the -// s3:PutAccelerateConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// The Transfer Acceleration state of a bucket can be set to one of the following -// two values: -// -// - Enabled – Enables accelerated data transfers to the bucket. -// -// - Suspended – Disables accelerated data transfers to the bucket. -// -// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. -// -// After setting the Transfer Acceleration state of a bucket to Enabled, it might -// take up to thirty minutes before the data transfer rates to the bucket increase. -// -// The name of the bucket used for Transfer Acceleration must be DNS-compliant and -// must not contain periods ("."). -// -// For more information about transfer acceleration, see [Transfer Acceleration]. -// -// The following operations are related to PutBucketAccelerateConfiguration : -// -// [GetBucketAccelerateConfiguration] -// -// [CreateBucket] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { - if params == nil { - params = &PutBucketAccelerateConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, c.addOperationPutBucketAccelerateConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketAccelerateConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketAccelerateConfigurationInput struct { - - // Container for setting the transfer acceleration state. - // - // This member is required. - AccelerateConfiguration *types.AccelerateConfiguration - - // The name of the bucket for which the accelerate configuration is set. - // - // This member is required. - Bucket *string - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketAccelerateConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAccelerateConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketAccelerateConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketAccelerateConfiguration", - } -} - -// getPutBucketAccelerateConfigurationRequestAlgorithmMember gets the request -// checksum algorithm value provided as input. -func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketAccelerateConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketAccelerateConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go deleted file mode 100644 index 045fa12c0260..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ /dev/null @@ -1,497 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue -// support for creating new Email Grantee Access Control Lists (ACL). Email Grantee -// ACLs created prior to this date will continue to work and remain accessible -// through the Amazon Web Services Management Console, Command Line Interface -// (CLI), SDKs, and REST API. However, you will no longer be able to create new -// Email Grantee ACLs. -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// Sets the permissions on an existing bucket using access control lists (ACL). -// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the -// WRITE_ACP permission. -// -// You can use one of the following two ways to set a bucket's permissions: -// -// - Specify the ACL in the request body -// -// - Specify permissions using request headers -// -// You cannot specify access permission using both the body and the request -// headers. -// -// Depending on your application needs, you may choose to set the ACL on a bucket -// using either the request body or the headers. For example, if you have an -// existing application that updates a bucket ACL using the request body, then you -// can continue to use that approach. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// ACLs are disabled and no longer affect permissions. You must use policies to -// grant access to your bucket and the objects in it. Requests to set ACLs or -// update ACLs fail and return the AccessControlListNotSupported error code. -// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the -// Amazon S3 User Guide. -// -// Permissions You can set access permissions by using one of the following -// methods: -// -// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a -// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined -// set of grantees and permissions. Specify the canned ACL name as the value of -// x-amz-acl . If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see [Canned ACL]. -// -// - Specify access permissions explicitly with the x-amz-grant-read , -// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control -// headers. When using these headers, you specify explicit access permissions and -// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the -// permission. If you use these ACL-specific headers, you cannot use the -// x-amz-acl header to set a canned ACL. These parameters map to the set of -// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. -// -// You specify each grantee as a type=value pair, where the type is one of the -// -// following: -// -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// -// - uri – if you are granting permissions to a predefined group -// -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// For example, the following x-amz-grant-write header grants create, overwrite, -// -// and delete objects permission to LogDelivery group predefined by Amazon S3 and -// two Amazon Web Services accounts identified by their email addresses. -// -// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", -// -// id="111122223333", id="555566667777" -// -// You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. -// -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (using request elements) in the following ways. For examples of -// how to specify these grantee values in JSON format, see the Amazon Web Services -// CLI example in [Enabling Amazon S3 server access logging]in the Amazon S3 User Guide. -// -// - By the person's ID: -// -// <>ID<><>GranteesEmail<> -// -// DisplayName is optional and ignored in the request -// -// - By URI: -// -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// - By Email address: -// -// <>Grantees@email.com<>& -// -// The grantee is resolved to the CanonicalUser and, in a response to a GET Object -// -// acl request, appears as the CanonicalUser. -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// The following operations are related to PutBucketAcl : -// -// [CreateBucket] -// -// [DeleteBucket] -// -// [GetObjectAcl] -// -// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region -// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html -// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html -// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL -// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Enabling Amazon S3 server access logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html -func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { - if params == nil { - params = &PutBucketAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, c.addOperationPutBucketAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketAclInput struct { - - // The bucket to which to apply the ACL. - // - // This member is required. - Bucket *string - - // The canned ACL to apply to the bucket. - ACL types.BucketCannedACL - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *types.AccessControlPolicy - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to [RFC 1864.] - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string - - // Allows grantee to list the objects in the bucket. - GrantRead *string - - // Allows grantee to read the bucket ACL. - GrantReadACP *string - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions and - // overwrites of those objects. - GrantWrite *string - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string - - noSmithyDocumentSerde -} - -func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketAclOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketAcl", - } -} - -// getPutBucketAclRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketAclInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketAclRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketAclBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go deleted file mode 100644 index 76658c53c60c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ /dev/null @@ -1,321 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). You can have up to 1,000 analytics configurations per bucket. -// -// You can choose to have storage class analysis export analysis reports sent to a -// comma-separated values (CSV) flat file. See the DataExport request element. -// Reports are updated daily and are based on the object filters that you -// configure. When selecting data export, you specify a destination bucket and an -// optional destination prefix where the file is written. You can export the data -// to a destination bucket in a different account. However, the destination bucket -// must be in the same Region as the bucket that you are making the PUT analytics -// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. -// -// You must create a bucket policy on the destination bucket where the exported -// file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. -// -// To use this operation, you must have permissions to perform the -// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// PutBucketAnalyticsConfiguration has the following special errors: -// -// - HTTP Error: HTTP 400 Bad Request -// -// - Code: InvalidArgument -// -// - Cause: Invalid argument. -// -// - HTTP Error: HTTP 400 Bad Request -// -// - Code: TooManyConfigurations -// -// - Cause: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// - HTTP Error: HTTP 403 Forbidden -// -// - Code: AccessDenied -// -// - Cause: You are not the owner of the specified bucket, or you do not have -// the s3:PutAnalyticsConfiguration bucket permission to set the configuration on -// the bucket. -// -// The following operations are related to PutBucketAnalyticsConfiguration : -// -// [GetBucketAnalyticsConfiguration] -// -// [DeleteBucketAnalyticsConfiguration] -// -// [ListBucketAnalyticsConfigurations] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 -// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html -// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { - if params == nil { - params = &PutBucketAnalyticsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, c.addOperationPutBucketAnalyticsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketAnalyticsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketAnalyticsConfigurationInput struct { - - // The configuration and any analyses for the analytics filter. - // - // This member is required. - AnalyticsConfiguration *types.AnalyticsConfiguration - - // The name of the bucket to which an analytics configuration is stored. - // - // This member is required. - Bucket *string - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketAnalyticsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAnalyticsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketAnalyticsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketAnalyticsConfiguration", - } -} - -// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketAnalyticsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go deleted file mode 100644 index d241ba8d37ca..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ /dev/null @@ -1,360 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the cors configuration for your bucket. If the configuration exists, -// Amazon S3 replaces it. -// -// To use this operation, you must be allowed to perform the s3:PutBucketCORS -// action. By default, the bucket owner has this permission and can grant it to -// others. -// -// You set this configuration on a bucket so that the bucket can service -// cross-origin requests. For example, you might want to enable a request whose -// origin is http://www.example.com to access your Amazon S3 bucket at -// my.example.bucket.com by using the browser's XMLHttpRequest capability. -// -// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors -// subresource to the bucket. The cors subresource is an XML document in which you -// configure rules that identify origins and the HTTP methods that can be executed -// on your bucket. The document is limited to 64 KB in size. -// -// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS -// request) against a bucket, it evaluates the cors configuration on the bucket -// and uses the first CORSRule rule that matches the incoming browser request to -// enable a cross-origin request. For a rule to match, the following conditions -// must be met: -// -// - The request's Origin header must match AllowedOrigin elements. -// -// - The request method (for example, GET, PUT, HEAD, and so on) or the -// Access-Control-Request-Method header in case of a pre-flight OPTIONS request -// must be one of the AllowedMethod elements. -// -// - Every header specified in the Access-Control-Request-Headers request header -// of a pre-flight request must match an AllowedHeader element. -// -// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. -// -// The following operations are related to PutBucketCors : -// -// [GetBucketCors] -// -// [DeleteBucketCors] -// -// [RESTOPTIONSobject] -// -// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html -// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html -func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { - if params == nil { - params = &PutBucketCorsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, c.addOperationPutBucketCorsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketCorsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketCorsInput struct { - - // Specifies the bucket impacted by the cors configuration. - // - // This member is required. - Bucket *string - - // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. - // - // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html - // - // This member is required. - CORSConfiguration *types.CORSConfiguration - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to [RFC 1864.] - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketCorsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketCors"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketCorsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketCors", - } -} - -// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketCorsInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketCorsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketCorsBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketCorsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketCorsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go deleted file mode 100644 index 197a4f184fea..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ /dev/null @@ -1,437 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation configures default encryption and Amazon S3 Bucket Keys for an -// existing bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// By default, all buckets have a default encryption configuration that uses -// server-side encryption with Amazon S3 managed keys (SSE-S3). -// -// - General purpose buckets -// -// - You can optionally configure default encryption for a bucket by using -// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or -// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). -// If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. -// For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the -// Amazon S3 User Guide. -// -// - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify -// that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID -// provided in PutBucketEncryption requests. -// -// - Directory buckets - You can optionally configure default encryption for a -// bucket by using server-side encryption with Key Management Service (KMS) keys -// (SSE-KMS). -// -// - We recommend that the bucket's default encryption uses the desired -// encryption configuration and you don't override the bucket default encryption in -// your CreateSession requests or PUT object requests. Then, new objects are -// automatically encrypted with the desired encryption settings. For more -// information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads] -// . -// -// - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket's -// lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. -// -// - S3 Bucket Keys are always enabled for GET and PUT operations in a directory -// bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy -// SSE-KMS encrypted objects from general purpose buckets to directory buckets, -// from directory buckets to general purpose buckets, or between directory buckets, -// through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a -// copy request is made for a KMS-encrypted object. -// -// - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the -// key ID or key ARN. The key alias format of the KMS key isn't supported. -// -// - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to -// SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption -// requests. -// -// If you're specifying a customer managed KMS key, we recommend using a fully -// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the -// key within the requester’s account. This behavior can result in data that's -// encrypted with a KMS key that belongs to the requester, and not the bucket -// owner. -// -// Also, this action requires Amazon Web Services Signature Version 4. For more -// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. -// -// Permissions -// -// - General purpose bucket permissions - The s3:PutEncryptionConfiguration -// permission is required in a policy. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:PutEncryptionConfiguration permission in an IAM -// identity-based policy instead of a bucket policy. Cross-account access to this -// API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// To set a directory bucket default encryption with SSE-KMS, you must also have -// -// the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based -// policies and KMS key policies for the target KMS key. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to PutBucketEncryption : -// -// [GetBucketEncryption] -// -// [DeleteBucketEncryption] -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk -// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html -// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html -// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html -// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job -// [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { - if params == nil { - params = &PutBucketEncryptionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, c.addOperationPutBucketEncryptionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketEncryptionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketEncryptionInput struct { - - // Specifies default encryption for a bucket using server-side encryption with - // different key options. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // Specifies the default server-side-encryption configuration. - // - // This member is required. - ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the server-side encryption - // configuration. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // This functionality is not supported for directory buckets. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketEncryptionOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketEncryption"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketEncryptionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketEncryption", - } -} - -// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketEncryptionInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketEncryptionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketEncryptionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketEncryptionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go deleted file mode 100644 index 5a7b8989bb51..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ /dev/null @@ -1,313 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can -// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to PutBucketIntelligentTieringConfiguration include: -// -// [DeleteBucketIntelligentTieringConfiguration] -// -// [GetBucketIntelligentTieringConfiguration] -// -// [ListBucketIntelligentTieringConfigurations] -// -// You only need S3 Intelligent-Tiering enabled on a bucket if you want to -// automatically move objects stored in the S3 Intelligent-Tiering storage class to -// the Archive Access or Deep Archive Access tier. -// -// PutBucketIntelligentTieringConfiguration has the following special errors: -// -// HTTP 400 Bad Request Error Code: InvalidArgument -// -// Cause: Invalid Argument -// -// HTTP 400 Bad Request Error Code: TooManyConfigurations -// -// Cause: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, -// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission -// to set the configuration on the bucket. -// -// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html -// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html -func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { - if params == nil { - params = &PutBucketIntelligentTieringConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, c.addOperationPutBucketIntelligentTieringConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketIntelligentTieringConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketIntelligentTieringConfigurationInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - // Container for S3 Intelligent-Tiering configuration. - // - // This member is required. - IntelligentTieringConfiguration *types.IntelligentTieringConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketIntelligentTieringConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketIntelligentTieringConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketIntelligentTieringConfiguration", - } -} - -// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketIntelligentTieringConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go deleted file mode 100644 index 162ac031abe4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ /dev/null @@ -1,330 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the PUT action adds an S3 Inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. -// -// Amazon S3 inventory generates inventories of the objects in the bucket on a -// daily or weekly basis, and the results are published to a flat file. The bucket -// that is inventoried is called the source bucket, and the bucket where the -// inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. -// -// When you configure an inventory for a source bucket, you specify the -// destination bucket where you want the inventory to be stored, and whether to -// generate the inventory daily or weekly. You can also configure what object -// metadata to include and whether to inventory all object versions or only current -// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. -// -// You must create a bucket policy on the destination bucket to grant permissions -// to Amazon S3 to write objects to the bucket in the defined location. For an -// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. -// -// Permissions To use this operation, you must have permission to perform the -// s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. -// -// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report -// that includes all object metadata fields available and to specify the -// destination bucket to store the inventory. A user with read access to objects in -// the destination bucket can also access all object metadata fields that are -// available in the inventory report. -// -// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. -// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] -// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] -// in the Amazon S3 User Guide. -// -// PutBucketInventoryConfiguration has the following special errors: -// -// HTTP 400 Bad Request Error Code: InvalidArgument -// -// Cause: Invalid Argument -// -// HTTP 400 Bad Request Error Code: TooManyConfigurations -// -// Cause: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, -// or you do not have the s3:PutInventoryConfiguration bucket permission to set -// the configuration on the bucket. -// -// The following operations are related to PutBucketInventoryConfiguration : -// -// [GetBucketInventoryConfiguration] -// -// [DeleteBucketInventoryConfiguration] -// -// [ListBucketInventoryConfigurations] -// -// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html -// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html -// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html -// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 -// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents -// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html -func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { - if params == nil { - params = &PutBucketInventoryConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, c.addOperationPutBucketInventoryConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketInventoryConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketInventoryConfigurationInput struct { - - // The name of the bucket where the inventory configuration will be stored. - // - // This member is required. - Bucket *string - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // Specifies the inventory configuration. - // - // This member is required. - InventoryConfiguration *types.InventoryConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketInventoryConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketInventoryConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketInventoryConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketInventoryConfiguration", - } -} - -// getPutBucketInventoryConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketInventoryConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go deleted file mode 100644 index df972372105c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ /dev/null @@ -1,446 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. Keep in mind that this will overwrite an existing -// lifecycle configuration, so if you want to retain any configuration details, -// they must be included in the new lifecycle configuration. For information about -// lifecycle configuration, see [Managing your storage lifecycle]. -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, object size, or any -// combination of these. Accordingly, this section describes the latest API. The -// previous version of the API supported filtering based only on an object key name -// prefix, which is supported for backward compatibility. For the related API -// description, see [PutBucketLifecycle]. -// -// Rules Permissions HTTP Host header syntax You specify the lifecycle -// configuration in your request body. The lifecycle configuration is specified as -// XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can -// have up to 1,000 rules. This limit is not adjustable. -// -// Bucket lifecycle configuration supports specifying a lifecycle rule using an -// object key name prefix, one or more object tags, object size, or any combination -// of these. Accordingly, this section describes the latest API. The previous -// version of the API supported filtering based only on an object key name prefix, -// which is supported for backward compatibility for general purpose buckets. For -// the related API description, see [PutBucketLifecycle]. -// -// Lifecyle configurations for directory buckets only support expiring objects and -// cancelling multipart uploads. Expiring of versioned objects,transitions and tag -// filters are not supported. -// -// A lifecycle rule consists of the following: -// -// - A filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, object size, or any -// combination of these. -// -// - A status indicating whether the rule is in effect. -// -// - One or more lifecycle transition and expiration actions that you want -// Amazon S3 to perform on the objects identified by the filter. If the state of -// your bucket is versioning-enabled or versioning-suspended, you can have many -// versions of the same object (one current version and zero or more noncurrent -// versions). Amazon S3 provides predefined actions that you can specify for -// current and noncurrent object versions. -// -// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. -// -// - General purpose bucket permissions - By default, all Amazon S3 resources -// are private, including buckets, objects, and related subresources (for example, -// lifecycle configuration and website configuration). Only the resource owner -// (that is, the Amazon Web Services account that created it) can access the -// resource. The resource owner can optionally grant access permissions to others -// by writing an access policy. For this operation, a user must have the -// s3:PutLifecycleConfiguration permission. -// -// You can also explicitly deny permissions. An explicit deny also supersedes any -// -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: -// -// - s3:DeleteObject -// -// - s3:DeleteObjectVersion -// -// - s3:PutLifecycleConfiguration -// -// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - You must have the -// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy -// to use this operation. Cross-account access to this API operation isn't -// supported. The resource owner can optionally grant access permissions to others -// by creating a role or user for them as long as they are within the same account -// as the owner and resource. -// -// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in -// -// the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name -// . Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// The following operations are related to PutBucketLifecycleConfiguration : -// -// [GetBucketLifecycleConfiguration] -// -// [DeleteBucketLifecycle] -// -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html -// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html -// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html -// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { - if params == nil { - params = &PutBucketLifecycleConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, c.addOperationPutBucketLifecycleConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketLifecycleConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketLifecycleConfigurationInput struct { - - // The name of the bucket for which to set the configuration. - // - // This member is required. - Bucket *string - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpectedBucketOwner *string - - // Container for lifecycle rules. You can add as many as 1,000 rules. - LifecycleConfiguration *types.BucketLifecycleConfiguration - - // Indicates which default minimum object size behavior is applied to the - // lifecycle configuration. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // - all_storage_classes_128K - Objects smaller than 128 KB will not transition - // to any storage class by default. - // - // - varies_by_storage_class - Objects smaller than 128 KB will transition to - // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, - // all other storage classes will prevent transitions smaller than 128 KB. - // - // To customize the minimum object size for any transition you can add a filter - // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body - // of your transition rule. Custom filters always take precedence over the default - // transition behavior. - TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize - - noSmithyDocumentSerde -} - -func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketLifecycleConfigurationOutput struct { - - // Indicates which default minimum object size behavior is applied to the - // lifecycle configuration. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // - all_storage_classes_128K - Objects smaller than 128 KB will not transition - // to any storage class by default. - // - // - varies_by_storage_class - Objects smaller than 128 KB will transition to - // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, - // all other storage classes will prevent transitions smaller than 128 KB. - // - // To customize the minimum object size for any transition you can add a filter - // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body - // of your transition rule. Custom filters always take precedence over the default - // transition behavior. - TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLifecycleConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketLifecycleConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketLifecycleConfiguration", - } -} - -// getPutBucketLifecycleConfigurationRequestAlgorithmMember gets the request -// checksum algorithm value provided as input. -func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketLifecycleConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketLifecycleConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go deleted file mode 100644 index 060341128568..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ /dev/null @@ -1,382 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue -// support for creating new Email Grantee Access Control Lists (ACL). Email Grantee -// ACLs created prior to this date will continue to work and remain accessible -// through the Amazon Web Services Management Console, Command Line Interface -// (CLI), SDKs, and REST API. However, you will no longer be able to create new -// Email Grantee ACLs. -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// This operation is not supported for directory buckets. -// -// Set the logging parameters for a bucket and to specify permissions for who can -// view and modify the logging parameters. All logs are saved to buckets in the -// same Amazon Web Services Region as the source bucket. To set the logging status -// of a bucket, you must be the bucket owner. -// -// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the -// Grantee request element to grant access to other people. The Permissions -// request element specifies the kind of access the grantee has to the logs. -// -// If the target bucket for log delivery uses the bucket owner enforced setting -// for S3 Object Ownership, you can't use the Grantee request element to grant -// access to others. Permissions can only be granted using policies. For more -// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. -// -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (by using request elements) in the following ways. For examples of -// how to specify these grantee values in JSON format, see the Amazon Web Services -// CLI example in [Enabling Amazon S3 server access logging]in the Amazon S3 User Guide. -// -// - By the person's ID: -// -// <>ID<><>GranteesEmail<> -// -// DisplayName is optional and ignored in the request. -// -// - By Email address: -// -// <>Grantees@email.com<> -// -// The grantee is resolved to the CanonicalUser and, in a response to a -// -// GETObjectAcl request, appears as the CanonicalUser. -// -// - By URI: -// -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// To enable logging, you use LoggingEnabled and its children request elements. To -// disable logging, you use an empty BucketLoggingStatus request element: -// -// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User -// Guide. -// -// For more information about creating a bucket, see [CreateBucket]. For more information about -// returning the logging status of a bucket, see [GetBucketLogging]. -// -// The following operations are related to PutBucketLogging : -// -// [PutObject] -// -// [DeleteBucket] -// -// [CreateBucket] -// -// [GetBucketLogging] -// -// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Enabling Amazon S3 server access logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html -// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html -func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { - if params == nil { - params = &PutBucketLoggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, c.addOperationPutBucketLoggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketLoggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketLoggingInput struct { - - // The name of the bucket for which to set the logging parameters. - // - // This member is required. - Bucket *string - - // Container for logging status information. - // - // This member is required. - BucketLoggingStatus *types.BucketLoggingStatus - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash of the PutBucketLogging request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketLoggingOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLogging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketLoggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketLogging", - } -} - -// getPutBucketLoggingRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketLoggingInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketLoggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketLoggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go deleted file mode 100644 index c665b2efec83..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ /dev/null @@ -1,299 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets a metrics configuration (specified by the metrics configuration ID) for -// the bucket. You can have up to 1,000 metrics configurations per bucket. If -// you're updating an existing metrics configuration, note that this is a full -// replacement of the existing metrics configuration. If you don't include the -// elements you want to keep, they are erased. -// -// To use this operation, you must have permissions to perform the -// s3:PutMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to PutBucketMetricsConfiguration : -// -// [DeleteBucketMetricsConfiguration] -// -// [GetBucketMetricsConfiguration] -// -// [ListBucketMetricsConfigurations] -// -// PutBucketMetricsConfiguration has the following special error: -// -// - Error code: TooManyConfigurations -// -// - Description: You are attempting to create a new configuration but have -// already reached the 1,000-configuration limit. -// -// - HTTP Status Code: HTTP 400 Bad Request -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html -// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html -// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { - if params == nil { - params = &PutBucketMetricsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, c.addOperationPutBucketMetricsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketMetricsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketMetricsConfigurationInput struct { - - // The name of the bucket for which the metrics configuration is set. - // - // This member is required. - Bucket *string - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // Specifies the metrics configuration. - // - // This member is required. - MetricsConfiguration *types.MetricsConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketMetricsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketMetricsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketMetricsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketMetricsConfiguration", - } -} - -// getPutBucketMetricsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketMetricsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go deleted file mode 100644 index 6ec91374fc5e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ /dev/null @@ -1,313 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Enables notifications of specified events for a bucket. For more information -// about event notifications, see [Configuring Event Notifications]. -// -// Using this API, you can replace an existing notification configuration. The -// configuration is an XML file that defines the event types that you want Amazon -// S3 to publish and the destination where you want Amazon S3 to publish an event -// notification when it detects an event of the specified type. -// -// By default, your bucket has no event notifications configured. That is, the -// notification configuration will be an empty NotificationConfiguration . -// -// This action replaces the existing notification configuration with the -// configuration you include in the request body. -// -// After Amazon S3 receives this request, it first verifies that any Amazon Simple -// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) -// destination exists, and that the bucket owner has permission to publish to it by -// sending a test notification. In the case of Lambda destinations, Amazon S3 -// verifies that the Lambda function permissions grant Amazon S3 permission to -// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. -// -// You can disable notifications by adding the empty NotificationConfiguration -// element. -// -// For more information about the number of event notification configurations that -// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. -// -// By default, only the bucket owner can configure notifications on a bucket. -// However, bucket owners can use a bucket policy to grant permission to other -// users to set this configuration with the required s3:PutBucketNotification -// permission. -// -// The PUT notification is an atomic operation. For example, suppose your -// notification configuration includes SNS topic, SQS queue, and Lambda function -// configurations. When you send a PUT request with this configuration, Amazon S3 -// sends test messages to your SNS topic. If the message fails, the entire PUT -// action will fail, and Amazon S3 will not add the configuration to your bucket. -// -// If the configuration in the request body includes only one TopicConfiguration -// specifying only the s3:ReducedRedundancyLostObject event type, the response -// will also include the x-amz-sns-test-message-id header containing the message -// ID of the test notification sent to the topic. -// -// The following action is related to PutBucketNotificationConfiguration : -// -// [GetBucketNotificationConfiguration] -// -// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 -// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html -// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { - if params == nil { - params = &PutBucketNotificationConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, c.addOperationPutBucketNotificationConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketNotificationConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketNotificationConfigurationInput struct { - - // The name of the bucket. - // - // This member is required. - Bucket *string - - // A container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off for the bucket. - // - // This member is required. - NotificationConfiguration *types.NotificationConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or - // false value. - SkipDestinationValidation *bool - - noSmithyDocumentSerde -} - -func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketNotificationConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketNotificationConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketNotificationConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketNotificationConfiguration", - } -} - -// getPutBucketNotificationConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketNotificationConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go deleted file mode 100644 index e84f53627c35..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ /dev/null @@ -1,322 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:PutBucketOwnershipControls permission. For more -// information about Amazon S3 permissions, see [Specifying permissions in a policy]. -// -// For information about Amazon S3 Object Ownership, see [Using object ownership]. -// -// The following operations are related to PutBucketOwnershipControls : -// -// # GetBucketOwnershipControls -// -// # DeleteBucketOwnershipControls -// -// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html -// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html -func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { - if params == nil { - params = &PutBucketOwnershipControlsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, c.addOperationPutBucketOwnershipControlsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketOwnershipControlsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketOwnershipControlsInput struct { - - // The name of the Amazon S3 bucket whose OwnershipControls you want to set. - // - // This member is required. - Bucket *string - - // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or - // ObjectWriter) that you want to apply to this Amazon S3 bucket. - // - // This member is required. - OwnershipControls *types.OwnershipControls - - // Indicates the algorithm used to create the checksum for the object when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm header sent. Otherwise, Amazon S3 fails the request - // with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] in the - // Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash of the OwnershipControls request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketOwnershipControlsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketOwnershipControls"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketOwnershipControlsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketOwnershipControls", - } -} - -// getPutBucketOwnershipControlsRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketOwnershipControlsRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketOwnershipControlsInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketOwnershipControlsRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketOwnershipControlsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketOwnershipControlsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go deleted file mode 100644 index 6c326792695a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ /dev/null @@ -1,410 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions If you are using an identity other than the root user of the Amazon -// Web Services account that owns the bucket, the calling identity must both have -// the PutBucketPolicy permissions on the specified bucket and belong to the -// bucket owner's account in order to use this operation. -// -// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. -// -// To ensure that bucket owners don't inadvertently lock themselves out of their -// own buckets, the root principal in a bucket owner's Amazon Web Services account -// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API -// actions, even if their bucket policy explicitly denies the root principal's -// access. Bucket owner root principals can only be blocked from performing these -// API actions by VPC endpoint policies and Amazon Web Services Organizations -// policies. -// -// - General purpose bucket permissions - The s3:PutBucketPolicy permission is -// required in a policy. For more information about general purpose buckets bucket -// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:PutBucketPolicy permission in an IAM identity-based -// policy instead of a bucket policy. Cross-account access to this API operation -// isn't supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] -// in the Amazon S3 User Guide. -// -// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to PutBucketPolicy : -// -// [CreateBucket] -// -// [DeleteBucket] -// -// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { - if params == nil { - params = &PutBucketPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, c.addOperationPutBucketPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketPolicyInput struct { - - // The name of the bucket. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The bucket policy as a JSON document. - // - // For directory buckets, the only IAM action supported in the bucket policy is - // s3express:CreateSession . - // - // This member is required. - Policy *string - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . - // - // For the x-amz-checksum-algorithm header, replace algorithm with the - // supported algorithm from the following list: - // - // - CRC32 - // - // - CRC32C - // - // - CRC64NVME - // - // - SHA1 - // - // - SHA256 - // - // For more information, see [Checking object integrity] in the Amazon S3 User Guide. - // - // If the individual checksum value you provide through x-amz-checksum-algorithm - // doesn't match the checksum algorithm you set through - // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest - // error. - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. - // - // This functionality is not supported for directory buckets. - ConfirmRemoveSelfBucketAccess *bool - - // The MD5 hash of the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // This functionality is not supported for directory buckets. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketPolicyOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketPolicyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketPolicy", - } -} - -// getPutBucketPolicyRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketPolicyInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketPolicyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketPolicyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go deleted file mode 100644 index 0ce30ce23605..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ /dev/null @@ -1,378 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Creates a replication configuration or replaces an existing one. For more -// information, see [Replication]in the Amazon S3 User Guide. -// -// Specify the replication configuration in the request body. In the replication -// configuration, you provide the name of the destination bucket or buckets where -// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume -// to replicate objects on your behalf, and other relevant information. You can -// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] -// aws:RequestedRegion condition key. -// -// A replication configuration must include at least one rule, and can contain a -// maximum of 1,000. Each rule identifies a subset of objects to replicate by -// filtering the objects in the source bucket. To choose additional subsets of -// objects to replicate, add a rule for each subset. -// -// To specify a subset of the objects in the source bucket to apply a replication -// rule to, add the Filter element as a child of the Rule element. You can filter -// objects based on an object key prefix, one or more object tags, or both. When -// you add the Filter element in the configuration, you must also add the following -// elements: DeleteMarkerReplication , Status , and Priority . -// -// If you are using an earlier version of the replication configuration, Amazon S3 -// handles replication of delete markers differently. For more information, see [Backward Compatibility]. -// -// For information about enabling versioning on a bucket, see [Using Versioning]. -// -// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't -// replicate objects that are stored at rest using server-side encryption with KMS -// keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: -// SourceSelectionCriteria , SseKmsEncryptedObjects , Status , -// EncryptionConfiguration , and ReplicaKmsKeyID . For information about -// replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. -// -// For information on PutBucketReplication errors, see [List of replication-related error codes] -// -// Permissions To create a PutBucketReplication request, you must have -// s3:PutReplicationConfiguration permissions for the bucket. -// -// By default, a resource owner, in this case the Amazon Web Services account that -// created the bucket, can perform this operation. The resource owner can also -// grant others permissions to perform the operation. For more information about -// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// To perform this operation, the user or role performing the action must have the [iam:PassRole] -// permission. -// -// The following operations are related to PutBucketReplication : -// -// [GetBucketReplication] -// -// [DeleteBucketReplication] -// -// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html -// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html -// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion -// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html -// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html -// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html -// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList -// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations -// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { - if params == nil { - params = &PutBucketReplicationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, c.addOperationPutBucketReplicationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketReplicationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketReplicationInput struct { - - // The name of the bucket - // - // This member is required. - Bucket *string - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - // - // This member is required. - ReplicationConfiguration *types.ReplicationConfiguration - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string - - noSmithyDocumentSerde -} - -func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketReplicationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketReplication"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketReplicationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketReplication", - } -} - -// getPutBucketReplicationRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketReplicationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketReplicationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketReplicationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketReplicationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go deleted file mode 100644 index e2f172eb1864..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ /dev/null @@ -1,325 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download will -// be charged for the download. For more information, see [Requester Pays Buckets]. -// -// The following operations are related to PutBucketRequestPayment : -// -// [CreateBucket] -// -// [GetBucketRequestPayment] -// -// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html -// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { - if params == nil { - params = &PutBucketRequestPaymentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, c.addOperationPutBucketRequestPaymentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketRequestPaymentOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketRequestPaymentInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for Payer. - // - // This member is required. - RequestPaymentConfiguration *types.RequestPaymentConfiguration - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketRequestPaymentOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketRequestPayment"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketRequestPaymentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketRequestPayment", - } -} - -// getPutBucketRequestPaymentRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketRequestPaymentInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketRequestPaymentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketRequestPaymentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go deleted file mode 100644 index a5a58186a184..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ /dev/null @@ -1,357 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the tags for a bucket. -// -// Use tags to organize your Amazon Web Services bill to reflect your own cost -// structure. To do this, sign up to get your Amazon Web Services account bill with -// tag key values included. Then, to see the cost of combined resources, organize -// your billing information according to resources with the same tag key values. -// For example, you can tag several resources with a specific application name, and -// then organize your billing information to see the total cost of that application -// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. -// -// When this operation sets the tags for a bucket, it will overwrite any current -// tags the bucket already has. You cannot use this operation to add tags to an -// existing list of tags. -// -// To use this operation, you must have permissions to perform the -// s3:PutBucketTagging action. The bucket owner has this permission by default and -// can grant this permission to others. For more information about permissions, see -// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// PutBucketTagging has the following special errors. For more Amazon S3 errors -// see, [Error Responses]. -// -// - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. -// -// - MalformedXML - The XML provided does not match the schema. -// -// - OperationAborted - A conflicting conditional action is currently in progress -// against this resource. Please try again. -// -// - InternalError - The service was unable to apply the provided tag to the -// bucket. -// -// The following operations are related to PutBucketTagging : -// -// [GetBucketTagging] -// -// [DeleteBucketTagging] -// -// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html -// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html -// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { - if params == nil { - params = &PutBucketTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, c.addOperationPutBucketTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketTaggingInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for the TagSet and Tag elements. - // - // This member is required. - Tagging *types.Tagging - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketTaggingOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketTagging", - } -} - -// getPutBucketTaggingRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketTaggingInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go deleted file mode 100644 index c30435638f7b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// When you enable versioning on a bucket for the first time, it might take a -// short amount of time for the change to be fully propagated. While this change is -// propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for -// requests to objects created or updated after enabling versioning. We recommend -// that you wait for 15 minutes after enabling versioning before issuing write -// operations ( PUT or DELETE ) on objects in the bucket. -// -// Sets the versioning state of an existing bucket. -// -// You can set the versioning state with one of the following values: -// -// Enabled—Enables versioning for the objects in the bucket. All objects added to -// the bucket receive a unique version ID. -// -// Suspended—Disables versioning for the objects in the bucket. All objects added -// to the bucket receive the version ID null. -// -// If the versioning state has never been set on a bucket, it has no versioning -// state; a [GetBucketVersioning]request does not return a versioning state value. -// -// In order to enable MFA Delete, you must be the bucket owner. If you are the -// bucket owner and want to enable MFA Delete in the bucket versioning -// configuration, you must include the x-amz-mfa request header and the Status and -// the MfaDelete request elements in a request to set the versioning state of the -// bucket. -// -// If you have an object expiration lifecycle configuration in your non-versioned -// bucket and you want to maintain the same permanent delete behavior when you -// enable versioning, you must add a noncurrent expiration policy. The noncurrent -// expiration lifecycle configuration will manage the deletes of the noncurrent -// object versions in the version-enabled bucket. (A version-enabled bucket -// maintains one current and zero or more noncurrent object versions.) For more -// information, see [Lifecycle and Versioning]. -// -// The following operations are related to PutBucketVersioning : -// -// [CreateBucket] -// -// [DeleteBucket] -// -// [GetBucketVersioning] -// -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config -// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html -func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { - if params == nil { - params = &PutBucketVersioningInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, c.addOperationPutBucketVersioningMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketVersioningOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketVersioningInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for setting the versioning state. - // - // This member is required. - VersioningConfiguration *types.VersioningConfiguration - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The concatenation of the authentication device's serial number, a space, and - // the value that is displayed on your authentication device. - MFA *string - - noSmithyDocumentSerde -} - -func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketVersioningOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketVersioning"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketVersioningInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketVersioning", - } -} - -// getPutBucketVersioningRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketVersioningInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketVersioningBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketVersioningInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketVersioningBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go deleted file mode 100644 index 9c8222ba8a47..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ /dev/null @@ -1,380 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the configuration of the website that is specified in the website -// subresource. To configure a bucket as a website, you can add this subresource on -// the bucket with website configuration information such as the file name of the -// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. -// -// This PUT action requires the S3:PutBucketWebsite permission. By default, only -// the bucket owner can configure the website attached to a bucket; however, bucket -// owners can allow other users to set the website configuration by writing a -// bucket policy that grants them the S3:PutBucketWebsite permission. -// -// To redirect all website requests sent to the bucket's website endpoint, you add -// a website configuration with the following elements. Because all requests are -// sent to another website, you don't need to provide index document name for the -// bucket. -// -// - WebsiteConfiguration -// -// - RedirectAllRequestsTo -// -// - HostName -// -// - Protocol -// -// If you want granular control over redirects, you can use the following elements -// to add routing rules that describe conditions for redirecting requests and -// information about the redirect destination. In this case, the website -// configuration must provide an index document for the bucket, because some -// requests might not be redirected. -// -// - WebsiteConfiguration -// -// - IndexDocument -// -// - Suffix -// -// - ErrorDocument -// -// - Key -// -// - RoutingRules -// -// - RoutingRule -// -// - Condition -// -// - HttpErrorCodeReturnedEquals -// -// - KeyPrefixEquals -// -// - Redirect -// -// - Protocol -// -// - HostName -// -// - ReplaceKeyPrefixWith -// -// - ReplaceKeyWith -// -// - HttpRedirectCode -// -// Amazon S3 has a limitation of 50 routing rules per website configuration. If -// you require more than 50 routing rules, you can use object redirect. For more -// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. -// -// The maximum request length is limited to 128 KB. -// -// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html -// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html -func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { - if params == nil { - params = &PutBucketWebsiteInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketWebsiteOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketWebsiteInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for the request. - // - // This member is required. - WebsiteConfiguration *types.WebsiteConfiguration - - // Indicates the algorithm used to create the checksum for the request when you - // use the SDK. This header will not provide any additional functionality if you - // don't use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketWebsiteOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketWebsite"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketWebsiteInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketWebsite", - } -} - -// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketWebsiteInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketWebsiteInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketWebsiteBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go deleted file mode 100644 index 774134be499e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ /dev/null @@ -1,1094 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "time" -) - -// End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue -// support for creating new Email Grantee Access Control Lists (ACL). Email Grantee -// ACLs created prior to this date will continue to work and remain accessible -// through the Amazon Web Services Management Console, Command Line Interface -// (CLI), SDKs, and REST API. However, you will no longer be able to create new -// Email Grantee ACLs. -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// Adds an object to a bucket. -// -// - Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. You cannot use PutObject to -// only update a single piece of metadata for an existing object. You must put the -// entire object with updated metadata if you want to update some values. -// -// - If your bucket uses the bucket owner enforced setting for Object Ownership, -// ACLs are disabled and no longer affect permissions. All objects written to the -// bucket by any account will be owned by the bucket owner. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Amazon S3 is a distributed system. If it receives multiple write requests for -// the same object simultaneously, it overwrites all but the last object written. -// However, Amazon S3 provides features that can modify this behavior: -// -// - S3 Object Lock - To prevent objects from being deleted or overwritten, you -// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. -// -// This functionality is not supported for directory buckets. -// -// - If-None-Match - Uploads the object only if the object key name does not -// already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 -// Precondition Failed error. If a conflicting operation occurs during the -// upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 -// failure, retry the upload. -// -// Expects the * character (asterisk). -// -// For more information, see [Add preconditions to S3 operations with conditional requests]in the Amazon S3 User Guide or [RFC 7232]. -// -// This functionality is not supported for S3 on Outposts. -// -// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 -// receives multiple write requests for the same object simultaneously, it stores -// all versions of the objects. For each write request that is made to the same -// object, Amazon S3 automatically generates a unique version ID of that object -// being stored in Amazon S3. You can retrieve, replace, or delete any version of -// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User -// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] -// . -// -// This functionality is not supported for directory buckets. -// -// Permissions -// -// - General purpose bucket permissions - The following permissions are required -// in your policies when your PutObject request includes specific headers. -// -// - s3:PutObject - To successfully complete the PutObject request, you must -// always have the s3:PutObject permission on a bucket to add an object to it. -// -// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject -// request, you must have the s3:PutObjectAcl . -// -// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject -// request, you must have the s3:PutObjectTagging . -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Data integrity with Content-MD5 -// -// - General purpose bucket - To ensure that data is not corrupted traversing -// the network, use the Content-MD5 header. When you use this header, Amazon S3 -// checks the object against the provided MD5 value and, if they do not match, -// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 -// digest, you can calculate the MD5 while putting the object to Amazon S3 and -// compare the returned ETag to the calculated MD5 value. -// -// - Directory bucket - This functionality is not supported for directory -// buckets. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// For more information about related Amazon S3 APIs, see the following: -// -// [CopyObject] -// -// [DeleteObject] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html -// [Add preconditions to S3 operations with conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [RFC 7232]: https://datatracker.ietf.org/doc/rfc7232/ -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html -func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { - if params == nil { - params = &PutObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, c.addOperationPutObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectInput struct { - - // The bucket name to which the PUT action was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the PUT action was initiated. - // - // This member is required. - Key *string - - // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon - // S3 User Guide. - // - // When adding a new object, you can use headers to grant ACL-based permissions to - // individual Amazon Web Services accounts or to predefined groups defined by - // Amazon S3. These permissions are then added to the ACL on the object. By - // default, all objects are private. Only the owner has full access control. For - // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. - // - // If the bucket that you're uploading objects to uses the bucket owner enforced - // setting for S3 Object Ownership, ACLs are disabled and no longer affect - // permissions. Buckets that use this setting only accept PUT requests that don't - // specify an ACL or PUT requests that specify bucket owner full control ACLs, such - // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL - // expressed in the XML format. PUT requests that contain other ACLs (for example, - // custom grants to certain Amazon Web Services accounts) fail and return a 400 - // error with the error code AccessControlListNotSupported . For more information, - // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL - // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - ACL types.ObjectCannedACL - - // Object data. - Body io.Reader - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // - // General purpose buckets - Setting this header to true causes Amazon S3 to use - // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this - // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. - // - // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT - // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't - // supported, when you copy SSE-KMS encrypted objects from general purpose buckets - // to directory buckets, from directory buckets to general purpose buckets, or - // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a - // call to KMS every time a copy request is made for a KMS-encrypted object. - // - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - // Can be used to specify caching behavior along the request/reply chain. For more - // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. - // - // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - CacheControl *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . - // - // For the x-amz-checksum-algorithm header, replace algorithm with the - // supported algorithm from the following list: - // - // - CRC32 - // - // - CRC32C - // - // - CRC64NVME - // - // - SHA1 - // - // - SHA256 - // - // For more information, see [Checking object integrity] in the Amazon S3 User Guide. - // - // If the individual checksum value you provide through x-amz-checksum-algorithm - // doesn't match the checksum algorithm you set through - // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest - // error. - // - // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any - // request to upload an object with a retention period configured using Amazon S3 - // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object - ChecksumAlgorithm types.ChecksumAlgorithm - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum - // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. - // - // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length - ContentLength *int64 - - // The Base64 encoded 128-bit MD5 digest of the message (without the headers) - // according to RFC 1864. This header can be used as a message integrity check to - // verify that the data is the same data that was originally sent. Although it is - // optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, see [REST Authentication]. - // - // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any - // request to upload an object with a retention period configured using Amazon S3 - // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html - // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object - ContentMD5 *string - - // A standard MIME type describing the format of the contents. For more - // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type - ContentType *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The date and time at which the object is no longer cacheable. For more - // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. - // - // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 - Expires *time.Time - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Allows grantee to read the object data and its metadata. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Allows grantee to read the object ACL. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantReadACP *string - - // Allows grantee to write the ACL for the applicable object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantWriteACP *string - - // Uploads the object only if the ETag (entity tag) value provided during the - // WRITE operation matches the ETag of the object in S3. If the ETag values do not - // match, the operation returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should fetch the - // object's ETag and retry the upload. - // - // Expects the ETag value as a string. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Uploads the object only if the object key name does not already exist in the - // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should retry the - // upload. - // - // Expects the '*' (asterisk) character. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode that you want to apply to this object. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256 ). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // Specifies the Amazon Web Services KMS Encryption Context as an additional - // encryption context to use for object encryption. The value of this header is a - // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption - // context as key-value pairs. This value is stored as object metadata and - // automatically gets passed on to Amazon Web Services KMS for future GetObject - // operations on this object. - // - // General purpose buckets - This value must be explicitly added during CopyObject - // operations if you want an additional encryption context for your object. For - // more information, see [Encryption context]in the Amazon S3 User Guide. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - // - // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context - SSEKMSEncryptionContext *string - - // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object - // encryption. If the KMS key doesn't exist in the same account that's issuing the - // command, you must use the full Key ARN not the Key ID. - // - // General purpose buckets - If you specify x-amz-server-side-encryption with - // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key - // Alias) of the KMS key to use. If you specify - // x-amz-server-side-encryption:aws:kms or - // x-amz-server-side-encryption:aws:kms:dsse , but do not provide - // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. - // - // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify - // the x-amz-server-side-encryption header to aws:kms . Then, the - // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's - // default KMS customer managed key ID. If you want to explicitly set the - // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's - // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS - // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 - // ) isn't supported. - // - // Incorrect key specification results in an HTTP 400 Bad Request error. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm that was used when you store this object - // in Amazon S3 or Amazon FSx. - // - // - General purpose buckets - You have four mutually exclusive options to - // protect data using server-side encryption in Amazon S3, depending on how you - // choose to manage the encryption keys. Specifically, the encryption key options - // are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or - // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with - // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You - // can optionally tell Amazon S3 to encrypt data at rest by using server-side - // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 - // User Guide. - // - // - Directory buckets - For directory buckets, there are only two supported - // options for server-side encryption: server-side encryption with Amazon S3 - // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys - // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses - // the desired encryption configuration and you don't override the bucket default - // encryption in your CreateSession requests or PUT object requests. Then, new - // objects are automatically encrypted with the desired encryption settings. For - // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about - // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. - // - // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the - // encryption request headers must match the encryption settings that are specified - // in the CreateSession request. You can't override the values of the encryption - // settings ( x-amz-server-side-encryption , - // x-amz-server-side-encryption-aws-kms-key-id , - // x-amz-server-side-encryption-context , and - // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the - // CreateSession request. You don't need to explicitly specify these encryption - // settings values in Zonal endpoint API calls, and Amazon S3 will use the - // encryption settings values from the CreateSession request to protect new - // objects in the directory bucket. - // - // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the - // session token refreshes automatically to avoid service interruptions when a - // session expires. The CLI or the Amazon Web Services SDKs use the bucket's - // default encryption configuration for the CreateSession request. It's not - // supported to override the encryption settings values in the CreateSession - // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption - // request headers must match the default encryption configuration of the directory - // bucket. - // - // - S3 access points for Amazon FSx - When accessing data stored in Amazon FSx - // file systems using S3 access points, the only valid server side encryption - // option is aws:fsx . All Amazon FSx file systems have encryption configured by - // default and are encrypted at rest. Data is automatically encrypted before being - // written to the file system, and automatically decrypted as it is read. These - // processes are handled transparently by Amazon FSx. - // - // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html - // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - ServerSideEncryption types.ServerSideEncryption - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high - // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. - // - // - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone - // storage class) in Availability Zones and ONEZONE_IA (the S3 One - // Zone-Infrequent Access storage class) in Dedicated Local Zones. - // - // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") - // - // This functionality is not supported for directory buckets. - Tagging *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. For information about object - // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. - // - // In the following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: - // - // x-amz-website-redirect-location: /anotherPage.html - // - // In the following example, the request header sets the object redirect to - // another website: - // - // x-amz-website-redirect-location: http://www.example.com/ - // - // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the - // Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html - // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html - // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html - WebsiteRedirectLocation *string - - // Specifies the offset for appending data to existing objects in bytes. The - // offset must be equal to the size of the existing object being appended to. If no - // object exists, setting this header to 0 will create a new object. - // - // This functionality is only supported for objects in the Amazon S3 Express One - // Zone storage class in directory buckets. - WriteOffsetBytes *int64 - - noSmithyDocumentSerde -} - -func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type PutObjectOutput struct { - - // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only - // present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This header is - // present if the object was uploaded with the CRC64NVME checksum algorithm, or if - // it was uploaded without a checksum (and Amazon S3 added the default checksum, - // CRC64NVME , to the uploaded object). For more information about how checksums - // are calculated with multipart uploads, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // This header specifies the checksum type of the object, which determines how - // part-level checksums are combined to create an object-level checksum for - // multipart objects. For PutObject uploads, the checksum type is always - // FULL_OBJECT . You can use this header as a data integrity check to verify that - // the checksum type that is received is the same checksum that was specified. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Entity tag for the uploaded object. - // - // General purpose buckets - To ensure that data is not corrupted traversing the - // network, for objects where the ETag is the MD5 digest of the object, you can - // calculate the MD5 while putting an object to Amazon S3 and compare the returned - // ETag to the calculated MD5 value. - // - // Directory buckets - The ETag for the object in a directory bucket isn't the MD5 - // digest of the object. - ETag *string - - // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User - // Guide, the response includes this header. It includes the expiry-date and - // rule-id key-value pairs that provide information about object expiration. The - // value of the rule-id is URL-encoded. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - // - // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html - Expiration *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded string of a - // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. - // This value is stored as object metadata and automatically gets passed on to - // Amazon Web Services KMS for future GetObject operations on this object. - SSEKMSEncryptionContext *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // The size of the object in bytes. This value is only be present if you append - // to an object. - // - // This functionality is only supported for objects in the Amazon S3 Express One - // Zone storage class in directory buckets. - Size *int64 - - // Version ID of the object. - // - // If you enable versioning for a bucket, Amazon S3 automatically generates a - // unique version ID for the object being stored. Amazon S3 returns this ID in the - // response. When you enable versioning for a bucket, if Amazon S3 receives - // multiple write requests for the same object simultaneously, it stores all of the - // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User - // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. - // - // This functionality is not supported for directory buckets. - // - // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html - // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = add100Continue(stack, options); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObject", - } -} - -// getPutObjectRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: true, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, -func getPutObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignPutObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &PutObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, - c.client.addOperationPutObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - func(stack *middleware.Stack, options Options) error { - return awshttp.RemoveContentTypeHeader(stack) - }, - addPutObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go deleted file mode 100644 index 110cf08cae98..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ /dev/null @@ -1,541 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Uses the acl subresource to set the access control list (ACL) permissions for a -// new or existing object in an S3 bucket. You must have the WRITE_ACP permission -// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User -// Guide. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// Depending on your application needs, you can choose to set the ACL on an object -// using either the request body or the headers. For example, if you have an -// existing application that updates a bucket ACL using the request body, you can -// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User -// Guide. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// ACLs are disabled and no longer affect permissions. You must use policies to -// grant access to your bucket and the objects in it. Requests to set ACLs or -// update ACLs fail and return the AccessControlListNotSupported error code. -// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the -// Amazon S3 User Guide. -// -// Permissions You can set access permissions using one of the following methods: -// -// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a -// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined -// set of grantees and permissions. Specify the canned ACL name as the value of -// x-amz-ac l. If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see [Canned ACL]. -// -// - Specify access permissions explicitly with the x-amz-grant-read , -// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control -// headers. When using these headers, you specify explicit access permissions and -// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the -// permission. If you use these ACL-specific headers, you cannot use x-amz-acl -// header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. -// -// You specify each grantee as a type=value pair, where the type is one of the -// -// following: -// -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// -// - uri – if you are granting permissions to a predefined group -// -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// For example, the following x-amz-grant-read header grants list objects -// -// permission to the two Amazon Web Services accounts identified by their email -// addresses. -// -// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. -// -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (using request elements) in the following ways. For examples of -// how to specify these grantee values in JSON format, see the Amazon Web Services -// CLI example in [Enabling Amazon S3 server access logging]in the Amazon S3 User Guide. -// -// - By the person's ID: -// -// <>ID<><>GranteesEmail<> -// -// DisplayName is optional and ignored in the request. -// -// - By URI: -// -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// - By Email address: -// -// <>Grantees@email.com<>lt;/Grantee> -// -// The grantee is resolved to the CanonicalUser and, in a response to a GET Object -// -// acl request, appears as the CanonicalUser. -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// Versioning The ACL of an object is set at the object version level. By default, -// PUT sets the ACL of the current version of an object. To set the ACL of a -// different version, use the versionId subresource. -// -// The following operations are related to PutObjectAcl : -// -// [CopyObject] -// -// [GetObject] -// -// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region -// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html -// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Enabling Amazon S3 server access logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html -func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { - if params == nil { - params = &PutObjectAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, c.addOperationPutObjectAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectAclInput struct { - - // The bucket name that contains the object to which you want to attach the ACL. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key for which the PUT action was initiated. - // - // This member is required. - Key *string - - // The canned ACL to apply to the object. For more information, see [Canned ACL]. - // - // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL - ACL types.ObjectCannedACL - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *types.AccessControlPolicy - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to [RFC 1864.>] - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Allows grantee to list the objects in the bucket. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Allows grantee to read the bucket ACL. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantReadACP *string - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions and - // overwrites of those objects. - GrantWrite *string - - // Allows grantee to write the ACL for the applicable bucket. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantWriteACP *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Version ID used to reference a specific version of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type PutObjectAclOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectAcl", - } -} - -// getPutObjectAclRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectAclInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectAclRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutObjectAclBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go deleted file mode 100644 index 1c937604c790..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ /dev/null @@ -1,354 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Applies a legal hold configuration to the specified object. For more -// information, see [Locking Objects]. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { - if params == nil { - params = &PutObjectLegalHoldInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, c.addOperationPutObjectLegalHoldMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectLegalHoldOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectLegalHoldInput struct { - - // The bucket name containing the object that you want to place a legal hold on. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object that you want to place a legal hold on. - // - // This member is required. - Key *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Container element for the legal hold configuration you want to apply to the - // specified object. - LegalHold *types.ObjectLockLegalHold - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The version ID of the object that you want to place a legal hold on. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectLegalHoldOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLegalHold"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectLegalHoldInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectLegalHold", - } -} - -// getPutObjectLegalHoldRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectLegalHoldInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectLegalHoldInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectLegalHoldBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go deleted file mode 100644 index 14837e1013b4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. For more information, see [Locking Objects]. -// -// - The DefaultRetention settings require both a mode and a period. -// -// - The DefaultRetention period can be either Days or Years but you must select -// one. You cannot specify Days and Years at the same time. -// -// - You can enable Object Lock for new or existing buckets. For more -// information, see [Configuring Object Lock]. -// -// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { - if params == nil { - params = &PutObjectLockConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, c.addOperationPutObjectLockConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectLockConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectLockConfigurationInput struct { - - // The bucket whose Object Lock configuration you want to create or replace. - // - // This member is required. - Bucket *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The Object Lock configuration that you want to apply to the specified bucket. - ObjectLockConfiguration *types.ObjectLockConfiguration - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string - - noSmithyDocumentSerde -} - -func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectLockConfigurationOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLockConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectLockConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectLockConfiguration", - } -} - -// getPutObjectLockConfigurationRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectLockConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectLockConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectLockConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go deleted file mode 100644 index f0ef2185c137..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Places an Object Retention configuration on an object. For more information, -// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order -// to place an Object Retention configuration on objects. Bypassing a Governance -// Retention configuration requires the s3:BypassGovernanceRetention permission. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { - if params == nil { - params = &PutObjectRetentionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, c.addOperationPutObjectRetentionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectRetentionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectRetentionInput struct { - - // The bucket name that contains the object you want to apply this Object - // Retention configuration to. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object that you want to apply this Object Retention - // configuration to. - // - // This member is required. - Key *string - - // Indicates whether this action should bypass Governance-mode restrictions. - BypassGovernanceRetention *bool - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The container element for the Object Retention configuration. - Retention *types.ObjectLockRetention - - // The version ID for the object that you want to apply this Object Retention - // configuration to. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectRetentionOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectRetention"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectRetentionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectRetention", - } -} - -// getPutObjectRetentionRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectRetentionInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectRetentionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectRetentionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectRetentionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go deleted file mode 100644 index 7c59221c013b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ /dev/null @@ -1,393 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the supplied tag-set to an object that already exists in a bucket. A tag -// is a key-value pair. For more information, see [Object Tagging]. -// -// You can associate tags with an object by sending a PUT request against the -// tagging subresource that is associated with the object. You can retrieve tags by -// sending a GET request. For more information, see [GetObjectTagging]. -// -// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. -// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. -// -// To use this operation, you must have permission to perform the -// s3:PutObjectTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. -// -// To put tags of any other version, use the versionId query parameter. You also -// need permission for the s3:PutObjectVersionTagging action. -// -// PutObjectTagging has the following special errors. For more Amazon S3 errors -// see, [Error Responses]. -// -// - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see [Object Tagging]. -// -// - MalformedXML - The XML provided does not match the schema. -// -// - OperationAborted - A conflicting conditional action is currently in progress -// against this resource. Please try again. -// -// - InternalError - The service was unable to apply the provided tag to the -// object. -// -// The following operations are related to PutObjectTagging : -// -// [GetObjectTagging] -// -// [DeleteObjectTagging] -// -// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html -// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html -// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html -// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html -func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { - if params == nil { - params = &PutObjectTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, c.addOperationPutObjectTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectTaggingInput struct { - - // The bucket name containing the object. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Name of the object key. - // - // This member is required. - Key *string - - // Container for the TagSet and Tag elements - // - // This member is required. - Tagging *types.Tagging - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The versionId of the object that the tag-set will be added to. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectTaggingOutput struct { - - // The versionId of the object the tag-set was added to. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectTagging", - } -} - -// getPutObjectTaggingRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectTaggingInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go deleted file mode 100644 index 8f79019b10fd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an -// object, it checks the PublicAccessBlock configuration for both the bucket (or -// the bucket that contains the object) and the bucket owner's account. If the -// PublicAccessBlock configurations are different between the bucket and the -// account, Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see [The Meaning of "Public"]. -// -// The following operations are related to PutPublicAccessBlock : -// -// [GetPublicAccessBlock] -// -// [DeletePublicAccessBlock] -// -// [GetBucketPolicyStatus] -// -// [Using Amazon S3 Block Public Access] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { - if params == nil { - params = &PutPublicAccessBlockInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, c.addOperationPutPublicAccessBlockMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutPublicAccessBlockOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutPublicAccessBlockInput struct { - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want - // to set. - // - // This member is required. - Bucket *string - - // The PublicAccessBlock configuration that you want to apply to this Amazon S3 - // bucket. You can enable the configuration options in any combination. For more - // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in - // the Amazon S3 User Guide. - // - // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status - // - // This member is required. - PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash of the PutPublicAccessBlock request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutPublicAccessBlockOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutPublicAccessBlock"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutPublicAccessBlockInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutPublicAccessBlock", - } -} - -// getPutPublicAccessBlockRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutPublicAccessBlockInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) { - in := input.(*PutPublicAccessBlockInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutPublicAccessBlockBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RenameObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RenameObject.go deleted file mode 100644 index ab057ffa6cc0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RenameObject.go +++ /dev/null @@ -1,397 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Renames an existing object in a directory bucket that uses the S3 Express One -// Zone storage class. You can use RenameObject by specifying an existing object’s -// name as the source and the new name of the object as the destination within the -// same directory bucket. -// -// RenameObject is only supported for objects stored in the S3 Express One Zone -// storage class. -// -// To prevent overwriting an object, you can use the If-None-Match conditional -// header. -// -// - If-None-Match - Renames the object only if an object with the specified -// name does not already exist in the directory bucket. If you don't want to -// overwrite an existing object, you can add the If-None-Match conditional header -// with the value ‘*’ in the RenameObject request. Amazon S3 then returns a 412 -// Precondition Failed error if the object with the specified name already -// exists. For more information, see [RFC 7232]. -// -// Permissions To grant access to the RenameObject operation on a directory -// bucket, we recommend that you use the CreateSession operation for session-based -// authorization. Specifically, you grant the s3express:CreateSession permission -// to the directory bucket in a bucket policy or an IAM identity-based policy. -// Then, you make the CreateSession API call on the directory bucket to obtain a -// session token. With the session token in your request header, you can make API -// requests to this operation. After the session token expires, you make another -// CreateSession API call to generate a new session token for use. The Amazon Web -// Services CLI and SDKs will create and manage your session including refreshing -// the session token automatically to avoid service interruptions when a session -// expires. In your bucket policy, you can specify the s3express:SessionMode -// condition key to control who can create a ReadWrite or ReadOnly session. A -// ReadWrite session is required for executing all the Zonal endpoint API -// operations, including RenameObject . For more information about authorization, -// see [CreateSession]CreateSession . To learn more about Zonal endpoint API operations, see [Authorizing Zonal endpoint API operations with CreateSession] in -// the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [RFC 7232]: https://datatracker.ietf.org/doc/rfc7232/ -// [Authorizing Zonal endpoint API operations with CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-create-session.html -func (c *Client) RenameObject(ctx context.Context, params *RenameObjectInput, optFns ...func(*Options)) (*RenameObjectOutput, error) { - if params == nil { - params = &RenameObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "RenameObject", params, optFns, c.addOperationRenameObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*RenameObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type RenameObjectInput struct { - - // The bucket name of the directory bucket containing the object. - // - // You must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // Key name of the object to rename. - // - // This member is required. - Key *string - - // Specifies the source for the rename operation. The value must be URL encoded. - // - // This member is required. - RenameSource *string - - // A unique string with a max of 64 ASCII characters in the ASCII range of 33 - - // 126. - // - // RenameObject supports idempotency using a client token. To make an idempotent - // API request using RenameObject , specify a client token in the request. You - // should not reuse the same client token for other API requests. If you retry a - // request that completed successfully using the same client token and the same - // parameters, the retry succeeds without performing any further actions. If you - // retry a successful request using the same client token, but one or more of the - // parameters are different, the retry fails and an IdempotentParameterMismatch - // error is returned. - ClientToken *string - - // Renames the object only if the ETag (entity tag) value provided during the - // operation matches the ETag of the object in S3. The If-Match header field makes - // the request method conditional on ETags. If the ETag values do not match, the - // operation returns a 412 Precondition Failed error. - // - // Expects the ETag value as a string. - DestinationIfMatch *string - - // Renames the object if the destination exists and if it has been modified since - // the specified time. - DestinationIfModifiedSince *time.Time - - // Renames the object only if the destination does not already exist in the - // specified directory bucket. If the object does exist when you send a request - // with If-None-Match:* , the S3 API will return a 412 Precondition Failed error, - // preventing an overwrite. The If-None-Match header prevents overwrites of - // existing data by validating that there's not an object with the same key name - // already in your directory bucket. - // - // Expects the * character (asterisk). - DestinationIfNoneMatch *string - - // Renames the object if it hasn't been modified since the specified time. - DestinationIfUnmodifiedSince *time.Time - - // Renames the object if the source exists and if its entity tag (ETag) matches - // the specified ETag. - SourceIfMatch *string - - // Renames the object if the source exists and if it has been modified since the - // specified time. - SourceIfModifiedSince *time.Time - - // Renames the object if the source exists and if its entity tag (ETag) is - // different than the specified ETag. If an asterisk ( * ) character is provided, - // the operation will fail and return a 412 Precondition Failed error. - SourceIfNoneMatch *string - - // Renames the object if the source exists and hasn't been modified since the - // specified time. - SourceIfUnmodifiedSince *time.Time - - noSmithyDocumentSerde -} - -func (in *RenameObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type RenameObjectOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationRenameObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpRenameObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpRenameObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "RenameObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addIdempotencyToken_opRenameObjectMiddleware(stack, options); err != nil { - return err - } - if err = addOpRenameObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRenameObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRenameObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *RenameObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -type idempotencyToken_initializeOpRenameObject struct { - tokenProvider IdempotencyTokenProvider -} - -func (*idempotencyToken_initializeOpRenameObject) ID() string { - return "OperationIdempotencyTokenAutoFill" -} - -func (m *idempotencyToken_initializeOpRenameObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.tokenProvider == nil { - return next.HandleInitialize(ctx, in) - } - - input, ok := in.Parameters.(*RenameObjectInput) - if !ok { - return out, metadata, fmt.Errorf("expected middleware input to be of type *RenameObjectInput ") - } - - if input.ClientToken == nil { - t, err := m.tokenProvider.GetIdempotencyToken() - if err != nil { - return out, metadata, err - } - input.ClientToken = &t - } - return next.HandleInitialize(ctx, in) -} -func addIdempotencyToken_opRenameObjectMiddleware(stack *middleware.Stack, cfg Options) error { - return stack.Initialize.Add(&idempotencyToken_initializeOpRenameObject{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) -} - -func newServiceMetadataMiddleware_opRenameObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "RenameObject", - } -} - -// getRenameObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getRenameObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*RenameObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addRenameObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getRenameObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go deleted file mode 100644 index 75c6957be0cd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ /dev/null @@ -1,494 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// # Restores an archived copy of an object back into Amazon S3 -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// This action performs the following types of requests: -// -// - restore an archive - Restore an archived object -// -// For more information about the S3 structure in the request body, see the -// following: -// -// [PutObject] -// -// [Managing Access with ACLs] -// - in the Amazon S3 User Guide -// -// [Protecting Data Using Server-Side Encryption] -// - in the Amazon S3 User Guide -// -// Permissions To use this operation, you must have permissions to perform the -// s3:RestoreObject action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] -// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. -// -// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval -// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or -// S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For -// objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage -// classes, you must first initiate a restore request, and then wait until a -// temporary copy of the object is available. If you want a permanent copy of the -// object, create a copy of it in the Amazon S3 Standard storage class in your S3 -// bucket. To access an archived object, you must restore the object for the -// duration (number of days) that you specify. For objects in the Archive Access or -// Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a -// restore request, and then wait until the object is moved into the Frequent -// Access tier. -// -// To restore a specific object version, you can provide a version ID. If you -// don't provide a version ID, Amazon S3 restores the current version. -// -// When restoring an archived object, you can specify one of the following data -// access tier options in the Tier element of the request body: -// -// - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier Flexible Retrieval storage class or S3 -// Intelligent-Tiering Archive tier when occasional urgent requests for restoring -// archives are required. For all but the largest archived objects (250 MB+), data -// accessed using Expedited retrievals is typically made available within 1–5 -// minutes. Provisioned capacity ensures that retrieval capacity for Expedited -// retrievals is available when you need it. Expedited retrievals and provisioned -// capacity are not available for objects stored in the S3 Glacier Deep Archive -// storage class or S3 Intelligent-Tiering Deep Archive tier. -// -// - Standard - Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for retrieval requests -// that do not specify the retrieval option. Standard retrievals typically finish -// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage -// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 -// hours for objects stored in the S3 Glacier Deep Archive storage class or S3 -// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects -// stored in S3 Intelligent-Tiering. -// -// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible -// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve -// large amounts, even petabytes, of data at no cost. Bulk retrievals typically -// finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval -// storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also -// the lowest-cost retrieval option when restoring objects from S3 Glacier Deep -// Archive. They typically finish within 48 hours for objects stored in the S3 -// Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. -// -// For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. -// -// You can use Amazon S3 restore speed upgrade to change the restore speed to a -// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon -// S3 User Guide. -// -// To get the status of object restoration, you can send a HEAD request. -// Operations return the x-amz-restore header, which provides information about -// the restoration status, in the response. You can use Amazon S3 event -// notifications to notify you when a restore is initiated or completed. For more -// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. -// -// After restoring an archived object, you can update the restoration period by -// reissuing the request with a new period. Amazon S3 updates the restoration -// period relative to the current time and charges only for the request-there are -// no data transfer charges. You cannot update the restoration period when Amazon -// S3 is actively processing your current restore request for the object. -// -// If your bucket has a lifecycle configuration with a rule that includes an -// expiration action, the object expiration overrides the life span that you -// specify in a restore request. For example, if you restore an object copy for 10 -// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the -// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] -// in Amazon S3 User Guide. -// -// Responses A successful action returns either the 200 OK or 202 Accepted status -// code. -// -// - If the object is not previously restored, then Amazon S3 returns 202 -// Accepted in the response. -// -// - If the object is previously restored, Amazon S3 returns 200 OK in the -// response. -// -// - Special errors: -// -// - Code: RestoreAlreadyInProgress -// -// - Cause: Object restore is already in progress. -// -// - HTTP Status Code: 409 Conflict -// -// - SOAP Fault Code Prefix: Client -// -// - Code: GlacierExpeditedRetrievalNotAvailable -// -// - Cause: expedited retrievals are currently not available. Try again later. -// (Returned if there is insufficient capacity to process the Expedited request. -// This error applies only to Expedited retrievals and not to S3 Standard or Bulk -// retrievals.) -// -// - HTTP Status Code: 503 -// -// - SOAP Fault Code Prefix: N/A -// -// The following operations are related to RestoreObject : -// -// [PutBucketLifecycleConfiguration] -// -// [GetBucketNotificationConfiguration] -// -// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html -// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html -// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html -func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { - if params == nil { - params = &RestoreObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, c.addOperationRestoreObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*RestoreObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type RestoreObjectInput struct { - - // The bucket name containing the object to restore. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the action was initiated. - // - // This member is required. - Key *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Container for restore job parameters. - RestoreRequest *types.RestoreRequest - - // VersionId used to reference a specific version of the object. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type RestoreObjectOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Indicates the path in the provided S3 output location where Select results will - // be restored to. - RestoreOutputPath *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *RestoreObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "RestoreObject", - } -} - -// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*RestoreObjectInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getRestoreObjectRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getRestoreObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getRestoreObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*RestoreObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getRestoreObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go deleted file mode 100644 index 2051618a3350..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ /dev/null @@ -1,518 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithysync "github.com/aws/smithy-go/sync" - "sync" -) - -// This operation is not supported for directory buckets. -// -// This action filters the contents of an Amazon S3 object based on a simple -// structured query language (SQL) statement. In the request, along with the SQL -// expression, you must also specify a data serialization format (JSON, CSV, or -// Apache Parquet) of the object. Amazon S3 uses this format to parse object data -// into records, and returns only records that match the specified SQL expression. -// You must also specify the data serialization format for the response. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User -// Guide. -// -// Permissions You must have the s3:GetObject permission for this operation. -// Amazon S3 Select does not support anonymous access. For more information about -// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. -// -// Object Data Formats You can use Amazon S3 Select to query objects that have the -// following format properties: -// -// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. -// -// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. -// -// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. -// GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports -// for CSV and JSON files. Amazon S3 Select supports columnar compression for -// Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object -// compression for Parquet objects. -// -// - Server-side encryption - Amazon S3 Select supports querying objects that -// are protected with server-side encryption. -// -// For objects that are encrypted with customer-provided encryption keys (SSE-C), -// -// you must use HTTPS, and you must use the headers that are documented in the [GetObject]. -// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. -// -// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon -// -// Web Services KMS keys (SSE-KMS), server-side encryption is handled -// transparently, so you don't need to specify anything. For more information about -// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 -// User Guide. -// -// Working with the Response Body Given the response size is unknown, Amazon S3 -// Select streams the response as a series of messages and includes a -// Transfer-Encoding header with chunked as its value in the response. For more -// information, see [Appendix: SelectObjectContent Response]. -// -// GetObject Support The SelectObjectContent action does not support the following -// GetObject functionality. For more information, see [GetObject]. -// -// - Range : Although you can specify a scan range for an Amazon S3 Select -// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of -// bytes of an object to return. -// -// - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the -// ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING -// storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or -// REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or -// DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For -// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. -// -// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] -// -// The following operations are related to SelectObjectContent : -// -// [GetObject] -// -// [GetBucketLifecycleConfiguration] -// -// [PutBucketLifecycleConfiguration] -// -// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html -// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html -// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html -// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange -// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList -// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html -// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html -// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// -// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html -func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { - if params == nil { - params = &SelectObjectContentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "SelectObjectContent", params, optFns, c.addOperationSelectObjectContentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*SelectObjectContentOutput) - out.ResultMetadata = metadata - return out, nil -} - -// Learn Amazon S3 Select is no longer available to new customers. Existing -// customers of Amazon S3 Select can continue to use the feature as usual. [Learn more] -// -// Request to filter the contents of an Amazon S3 object based on a simple -// Structured Query Language (SQL) statement. In the request, along with the SQL -// expression, you must specify a data serialization format (JSON or CSV) of the -// object. Amazon S3 uses this to parse object data into records. It returns only -// records that match the specified SQL expression. You must also specify the data -// serialization format for the response. For more information, see [S3Select API Documentation]. -// -// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ -// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html -type SelectObjectContentInput struct { - - // The S3 bucket. - // - // This member is required. - Bucket *string - - // The expression that is used to query the object. - // - // This member is required. - Expression *string - - // The type of the provided expression (for example, SQL). - // - // This member is required. - ExpressionType types.ExpressionType - - // Describes the format of the data in the object that is being queried. - // - // This member is required. - InputSerialization *types.InputSerialization - - // The object key. - // - // This member is required. - Key *string - - // Describes the format of the data that you want Amazon S3 to return in response. - // - // This member is required. - OutputSerialization *types.OutputSerialization - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Specifies if periodic request progress information should be enabled. - RequestProgress *types.RequestProgress - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerAlgorithm *string - - // The server-side encryption (SSE) customer managed key. This parameter is needed - // only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // The MD5 server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - // Specifies the byte range of the object to get the records from. A record is - // processed when its first byte is contained by the range. This parameter is - // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. - // - // ScanRange may be used in the following ways: - // - // - 50100 - process only the records starting between the bytes 50 and 100 - // (inclusive, counting from zero) - // - // - 50 - process only the records starting after the byte 50 - // - // - 50 - process only the records within the last 50 bytes of the file. - ScanRange *types.ScanRange - - noSmithyDocumentSerde -} - -func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type SelectObjectContentOutput struct { - eventStream *SelectObjectContentEventStream - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -// GetStream returns the type to interact with the event stream. -func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { - return o.eventStream -} - -func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpSelectObjectContent{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "SelectObjectContent"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSelectObjectContent(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *SelectObjectContentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "SelectObjectContent", - } -} - -// getSelectObjectContentBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getSelectObjectContentBucketMember(input interface{}) (*string, bool) { - in := input.(*SelectObjectContentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addSelectObjectContentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getSelectObjectContentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation. -// -// For testing and mocking the event stream this type should be initialized via -// the NewSelectObjectContentEventStream constructor function. Using the functional options -// to pass in nested mock behavior. -type SelectObjectContentEventStream struct { - // SelectObjectContentEventStreamReader is the EventStream reader for the - // SelectObjectContentEventStream events. This value is automatically set by the - // SDK when the API call is made Use this member when unit testing your code with - // the SDK to mock out the EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader - - done chan struct{} - closeOnce sync.Once - err *smithysync.OnceErr -} - -// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. -// This function should only be used for testing and mocking the SelectObjectContentEventStream -// stream within your application. -// -// The Reader member must be set before reading events from the stream. -func NewSelectObjectContentEventStream(optFns ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { - es := &SelectObjectContentEventStream{ - done: make(chan struct{}), - err: smithysync.NewOnceErr(), - } - for _, fn := range optFns { - fn(es) - } - return es -} - -// Events returns a channel to read events from. -func (es *SelectObjectContentEventStream) Events() <-chan types.SelectObjectContentEventStream { - return es.Reader.Events() -} - -// Close closes the stream. This will also cause the stream to be closed. -// Close must be called when done using the stream API. Not calling Close -// may result in resource leaks. -// -// Will close the underlying EventStream writer and reader, and no more events can be -// sent or received. -func (es *SelectObjectContentEventStream) Close() error { - es.closeOnce.Do(es.safeClose) - return es.Err() -} - -func (es *SelectObjectContentEventStream) safeClose() { - close(es.done) - - es.Reader.Close() -} - -// Err returns any error that occurred while reading or writing EventStream Events -// from the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.err.Err(); err != nil { - return err - } - - if err := es.Reader.Err(); err != nil { - return err - } - - return nil -} - -func (es *SelectObjectContentEventStream) waitStreamClose() { - type errorSet interface { - ErrorSet() <-chan struct{} - } - - var outputErrCh <-chan struct{} - if v, ok := es.Reader.(errorSet); ok { - outputErrCh = v.ErrorSet() - } - var outputClosedCh <-chan struct{} - if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { - outputClosedCh = v.Closed() - } - - select { - case <-es.done: - case <-outputErrCh: - es.err.SetError(es.Reader.Err()) - es.Close() - - case <-outputClosedCh: - if err := es.Reader.Err(); err != nil { - es.err.SetError(es.Reader.Err()) - } - es.Close() - - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UpdateBucketMetadataInventoryTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UpdateBucketMetadataInventoryTableConfiguration.go deleted file mode 100644 index 6e919c9b9021..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UpdateBucketMetadataInventoryTableConfiguration.go +++ /dev/null @@ -1,336 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Enables or disables a live inventory table for an S3 Metadata configuration on -// a general purpose bucket. For more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the following permissions. For -// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// If you want to encrypt your inventory table with server-side encryption with -// Key Management Service (KMS) keys (SSE-KMS), you need additional permissions in -// your KMS key policy. For more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// - s3:UpdateBucketMetadataInventoryTableConfiguration -// -// - s3tables:CreateTableBucket -// -// - s3tables:CreateNamespace -// -// - s3tables:GetTable -// -// - s3tables:CreateTable -// -// - s3tables:PutTablePolicy -// -// - s3tables:PutTableEncryption -// -// - kms:DescribeKey -// -// The following operations are related to -// UpdateBucketMetadataInventoryTableConfiguration : -// -// [CreateBucketMetadataConfiguration] -// -// [DeleteBucketMetadataConfiguration] -// -// [GetBucketMetadataConfiguration] -// -// [UpdateBucketMetadataJournalTableConfiguration] -// -// [GetBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataConfiguration.html -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [UpdateBucketMetadataJournalTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataJournalTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [DeleteBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataConfiguration.html -func (c *Client) UpdateBucketMetadataInventoryTableConfiguration(ctx context.Context, params *UpdateBucketMetadataInventoryTableConfigurationInput, optFns ...func(*Options)) (*UpdateBucketMetadataInventoryTableConfigurationOutput, error) { - if params == nil { - params = &UpdateBucketMetadataInventoryTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UpdateBucketMetadataInventoryTableConfiguration", params, optFns, c.addOperationUpdateBucketMetadataInventoryTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UpdateBucketMetadataInventoryTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UpdateBucketMetadataInventoryTableConfigurationInput struct { - - // The general purpose bucket that corresponds to the metadata configuration that - // you want to enable or disable an inventory table for. - // - // This member is required. - Bucket *string - - // The contents of your inventory table configuration. - // - // This member is required. - InventoryTableConfiguration *types.InventoryTableConfigurationUpdates - - // The checksum algorithm to use with your inventory table configuration. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Content-MD5 header for the inventory table configuration. - ContentMD5 *string - - // The expected owner of the general purpose bucket that corresponds to the - // metadata table configuration that you want to enable or disable an inventory - // table for. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *UpdateBucketMetadataInventoryTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type UpdateBucketMetadataInventoryTableConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUpdateBucketMetadataInventoryTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpUpdateBucketMetadataInventoryTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpUpdateBucketMetadataInventoryTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateBucketMetadataInventoryTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpUpdateBucketMetadataInventoryTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateBucketMetadataInventoryTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addUpdateBucketMetadataInventoryTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addUpdateBucketMetadataInventoryTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *UpdateBucketMetadataInventoryTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opUpdateBucketMetadataInventoryTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UpdateBucketMetadataInventoryTableConfiguration", - } -} - -// getUpdateBucketMetadataInventoryTableConfigurationRequestAlgorithmMember gets -// the request checksum algorithm value provided as input. -func getUpdateBucketMetadataInventoryTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*UpdateBucketMetadataInventoryTableConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addUpdateBucketMetadataInventoryTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getUpdateBucketMetadataInventoryTableConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getUpdateBucketMetadataInventoryTableConfigurationBucketMember returns a -// pointer to string denoting a provided bucket member valueand a boolean -// indicating if the input has a modeled bucket name, -func getUpdateBucketMetadataInventoryTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*UpdateBucketMetadataInventoryTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addUpdateBucketMetadataInventoryTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getUpdateBucketMetadataInventoryTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UpdateBucketMetadataJournalTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UpdateBucketMetadataJournalTableConfiguration.go deleted file mode 100644 index 9c16afea3ab6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UpdateBucketMetadataJournalTableConfiguration.go +++ /dev/null @@ -1,318 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Enables or disables journal table record expiration for an S3 Metadata -// configuration on a general purpose bucket. For more information, see [Accelerating data discovery with S3 Metadata]in the -// Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the -// s3:UpdateBucketMetadataJournalTableConfiguration permission. For more -// information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// The following operations are related to -// UpdateBucketMetadataJournalTableConfiguration : -// -// [CreateBucketMetadataConfiguration] -// -// [DeleteBucketMetadataConfiguration] -// -// [GetBucketMetadataConfiguration] -// -// [UpdateBucketMetadataInventoryTableConfiguration] -// -// [GetBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataConfiguration.html -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -// [UpdateBucketMetadataInventoryTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UpdateBucketMetadataInventoryTableConfiguration.html -// [DeleteBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataConfiguration.html -func (c *Client) UpdateBucketMetadataJournalTableConfiguration(ctx context.Context, params *UpdateBucketMetadataJournalTableConfigurationInput, optFns ...func(*Options)) (*UpdateBucketMetadataJournalTableConfigurationOutput, error) { - if params == nil { - params = &UpdateBucketMetadataJournalTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UpdateBucketMetadataJournalTableConfiguration", params, optFns, c.addOperationUpdateBucketMetadataJournalTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UpdateBucketMetadataJournalTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UpdateBucketMetadataJournalTableConfigurationInput struct { - - // The general purpose bucket that corresponds to the metadata configuration that - // you want to enable or disable journal table record expiration for. - // - // This member is required. - Bucket *string - - // The contents of your journal table configuration. - // - // This member is required. - JournalTableConfiguration *types.JournalTableConfigurationUpdates - - // The checksum algorithm to use with your journal table configuration. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Content-MD5 header for the journal table configuration. - ContentMD5 *string - - // The expected owner of the general purpose bucket that corresponds to the - // metadata table configuration that you want to enable or disable journal table - // record expiration for. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *UpdateBucketMetadataJournalTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type UpdateBucketMetadataJournalTableConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUpdateBucketMetadataJournalTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpUpdateBucketMetadataJournalTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpUpdateBucketMetadataJournalTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateBucketMetadataJournalTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpUpdateBucketMetadataJournalTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateBucketMetadataJournalTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addUpdateBucketMetadataJournalTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addUpdateBucketMetadataJournalTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *UpdateBucketMetadataJournalTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opUpdateBucketMetadataJournalTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UpdateBucketMetadataJournalTableConfiguration", - } -} - -// getUpdateBucketMetadataJournalTableConfigurationRequestAlgorithmMember gets the -// request checksum algorithm value provided as input. -func getUpdateBucketMetadataJournalTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*UpdateBucketMetadataJournalTableConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addUpdateBucketMetadataJournalTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getUpdateBucketMetadataJournalTableConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getUpdateBucketMetadataJournalTableConfigurationBucketMember returns a pointer -// to string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getUpdateBucketMetadataJournalTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*UpdateBucketMetadataJournalTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addUpdateBucketMetadataJournalTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getUpdateBucketMetadataJournalTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go deleted file mode 100644 index 17f82aecb185..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ /dev/null @@ -1,726 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -// Uploads a part in a multipart upload. -// -// In this operation, you provide new data as a part of an object in your request. -// However, you have an option to specify your existing Amazon S3 object as a data -// source for the part you are uploading. To upload a part from an existing object, -// you use the [UploadPartCopy]operation. -// -// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In -// response to your initiate request, Amazon S3 returns an upload ID, a unique -// identifier that you must include in your upload part request. -// -// Part numbers can be any number from 1 to 10,000, inclusive. A part number -// uniquely identifies a part and also defines its position within the object being -// created. If you upload a new part using the same part number that was used with -// a previous part, the previously uploaded part is overwritten. -// -// For information about maximum and minimum part sizes and other multipart upload -// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. -// -// After you initiate multipart upload and upload one or more parts, you must -// either complete or abort multipart upload in order to stop getting charged for -// storage of the uploaded parts. Only after you either complete or abort multipart -// upload, Amazon S3 frees up the parts storage and stops charging you for the -// parts storage. -// -// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - To perform a multipart upload with -// encryption using an Key Management Service key, the requester must have -// permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The -// requester must also have permissions for the kms:GenerateDataKey action for -// the CreateMultipartUpload API. Then, the requester needs permissions for the -// kms:Decrypt action on the UploadPart and UploadPartCopy APIs. -// -// These permissions are required because Amazon S3 must decrypt and read data -// -// from the encrypted file parts before it completes the multipart upload. For more -// information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For -// information about the permissions required to use the multipart upload API, see [Multipart upload and permissions] -// and [Multipart upload API and permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Data integrity General purpose bucket - To ensure that data is not corrupted -// traversing the network, specify the Content-MD5 header in the upload part -// request. Amazon S3 checks the part data against the provided MD5 value. If they -// do not match, Amazon S3 returns an error. If the upload request is signed with -// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 -// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. -// -// Directory buckets - MD5 is not supported by directory buckets. You can use -// checksum algorithms to check object integrity. -// -// Encryption -// - General purpose bucket - Server-side encryption is for data encryption at -// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You have mutually exclusive options to -// protect data using server-side encryption in Amazon S3, depending on how you -// choose to manage the encryption keys. Specifically, the encryption key options -// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and -// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side -// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally -// tell Amazon S3 to encrypt data at rest using server-side encryption with other -// key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). -// -// Server-side encryption is supported by the S3 Multipart Upload operations. -// -// Unless you are using a customer-provided encryption key (SSE-C), you don't need -// to specify the encryption parameters in each UploadPart request. Instead, you -// only need to specify the server-side encryption parameters in the initial -// Initiate Multipart request. For more information, see [CreateMultipartUpload]. -// -// If you request server-side encryption using a customer-provided encryption key -// -// (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following request headers. -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: server-side encryption with Amazon S3 -// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). -// -// Special errors -// -// - Error Code: NoSuchUpload -// -// - Description: The specified multipart upload does not exist. The upload ID -// might be invalid, or the multipart upload might have been aborted or completed. -// -// - HTTP Status Code: 404 Not Found -// -// - SOAP Fault Code Prefix: Client -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to UploadPart : -// -// [CreateMultipartUpload] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html -// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions -func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { - if params == nil { - params = &UploadPartInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, c.addOperationUploadPartMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UploadPartOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UploadPartInput struct { - - // The name of the bucket to which the multipart upload was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Part number of part being uploaded. This is a positive integer between 1 and - // 10,000. - // - // This member is required. - PartNumber *int32 - - // Upload ID identifying the multipart upload whose part is being uploaded. - // - // This member is required. - UploadId *string - - // Object data. - Body io.Reader - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // This checksum algorithm must be the same for all parts and it match the - // checksum value supplied in the CreateMultipartUpload request. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. - ContentLength *int64 - - // The Base64 encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. - // - // This functionality is not supported for directory buckets. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type UploadPartOutput struct { - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only - // present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // Entity tag for the uploaded object. - ETag *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPart"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpUploadPartValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = add100Continue(stack, options); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addUploadPartUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *UploadPartInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UploadPart", - } -} - -// getUploadPartRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*UploadPartInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getUploadPartRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: true, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getUploadPartBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getUploadPartBucketMember(input interface{}) (*string, bool) { - in := input.(*UploadPartInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getUploadPartBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignUploadPart is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &UploadPartInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, - c.client.addOperationUploadPartMiddlewares, - presignConverter(options).convertToPresignMiddleware, - func(stack *middleware.Stack, options Options) error { - return awshttp.RemoveContentTypeHeader(stack) - }, - addUploadPartPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addUploadPartPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go deleted file mode 100644 index d2ef00bb3171..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ /dev/null @@ -1,698 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Uploads a part by copying data from an existing object as data source. To -// specify the data source, you add the request header x-amz-copy-source in your -// request. To specify a byte range, you add the request header -// x-amz-copy-source-range in your request. -// -// For information about maximum and minimum part sizes and other multipart upload -// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. -// -// Instead of copying data from an existing object as part data, you might use the [UploadPart] -// action to upload new data as a part of an object in your request. -// -// You must initiate a multipart upload before you can upload any part. In -// response to your initiate request, Amazon S3 returns the upload ID, a unique -// identifier that you must include in your upload part request. -// -// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User -// Guide. For information about copying objects using a single atomic action vs. a -// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints -// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. -// -// Authentication and authorization All UploadPartCopy requests must be -// authenticated and signed by using IAM credentials (access key ID and secret -// access key for the IAM identities). All headers with the x-amz- prefix, -// including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. -// -// Directory buckets - You must use IAM credentials to authenticate and authorize -// your access to the UploadPartCopy API operation, instead of using the temporary -// security credentials through the CreateSession API operation. -// -// Amazon Web Services CLI or SDKs handles authentication and authorization on -// your behalf. -// -// Permissions You must have READ access to the source object and WRITE access to -// the destination bucket. -// -// - General purpose bucket permissions - You must have the permissions in a -// policy based on the bucket types of your source bucket and destination bucket in -// an UploadPartCopy operation. -// -// - If the source object is in a general purpose bucket, you must have the -// s3:GetObject permission to read the source object that is being copied. -// -// - If the destination bucket is a general purpose bucket, you must have the -// s3:PutObject permission to write the object copy to the destination bucket. -// -// - To perform a multipart upload with encryption using an Key Management -// Service key, the requester must have permission to the kms:Decrypt and -// kms:GenerateDataKey actions on the key. The requester must also have -// permissions for the kms:GenerateDataKey action for the CreateMultipartUpload -// API. Then, the requester needs permissions for the kms:Decrypt action on the -// UploadPart and UploadPartCopy APIs. These permissions are required because -// Amazon S3 must decrypt and read data from the encrypted file parts before it -// completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS] -// in the Amazon S3 User Guide. For information about the permissions required to -// use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - You must have permissions in a bucket policy -// or an IAM identity-based policy based on the source and destination bucket types -// in an UploadPartCopy operation. -// -// - If the source object that you want to copy is in a directory bucket, you -// must have the s3express:CreateSession permission in the Action element of a -// policy to read the object. By default, the session is in the ReadWrite mode. -// If you want to restrict the access, you can explicitly set the -// s3express:SessionMode condition key to ReadOnly on the copy source bucket. -// -// - If the copy destination is a directory bucket, you must have the -// s3express:CreateSession permission in the Action element of a policy to write -// the object to the destination. The s3express:SessionMode condition key cannot -// be set to ReadOnly on the copy destination. -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Encryption -// -// - General purpose buckets - For information about using server-side -// encryption with customer-provided encryption keys with the UploadPartCopy -// operation, see [CopyObject]and [UploadPart]. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: server-side encryption with Amazon S3 -// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. -// -// For directory buckets, when you perform a CreateMultipartUpload operation and an -// -// UploadPartCopy operation, the request headers you provide in the -// CreateMultipartUpload request must match the default encryption configuration -// of the destination bucket. -// -// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from -// -// general purpose buckets to directory buckets, from directory buckets to general -// purpose buckets, or between directory buckets, through [UploadPartCopy]. In this case, Amazon -// S3 makes a call to KMS every time a copy request is made for a KMS-encrypted -// object. -// -// Special errors -// -// - Error Code: NoSuchUpload -// -// - Description: The specified multipart upload does not exist. The upload ID -// might be invalid, or the multipart upload might have been aborted or completed. -// -// - HTTP Status Code: 404 Not Found -// -// - Error Code: InvalidRequest -// -// - Description: The specified copy source is not supported as a byte-range -// copy source. -// -// - HTTP Status Code: 400 Bad Request -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to UploadPartCopy : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html -// -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { - if params == nil { - params = &UploadPartCopyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, c.addOperationUploadPartCopyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UploadPartCopyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UploadPartCopyInput struct { - - // The bucket name. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Copying objects across different Amazon Web Services Regions isn't supported - // when the source or destination bucket is in Amazon Web Services Local Zones. The - // source and destination buckets must have the same parent Amazon Web Services - // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code - // InvalidRequest . - // - // Access points - When you use this action with an access point for general - // purpose buckets, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When you use this action with an - // access point for directory buckets, you must provide the access point name in - // place of the bucket name. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points are not supported by directory buckets. - // - // S3 on Outposts - When you use this action with S3 on Outposts, you must direct - // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the - // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When - // you use this action with S3 on Outposts, the destination bucket must be the - // Outposts access point ARN or the access point alias. For more information about - // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Specifies the source object for the copy operation. You specify the value in - // one of two formats, depending on whether you want to access the source object - // through an [access point]: - // - // - For objects not accessed through an access point, specify the name of the - // source bucket and key of the source object, separated by a slash (/). For - // example, to copy the object reports/january.pdf from the bucket - // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must - // be URL-encoded. - // - // - For objects accessed through access points, specify the Amazon Resource - // Name (ARN) of the object as accessed through the access point, in the format - // arn:aws:s3:::accesspoint//object/ . For example, to copy the object - // reports/january.pdf through access point my-access-point owned by account - // 123456789012 in Region us-west-2 , use the URL encoding of - // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf - // . The value must be URL encoded. - // - // - Amazon S3 supports copy operations using Access points only when the source - // and destination buckets are in the same Amazon Web Services Region. - // - // - Access points are not supported by directory buckets. - // - // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the - // ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 - // in Region us-west-2 , use the URL encoding of - // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf - // . The value must be URL-encoded. - // - // If your bucket has versioning enabled, you could have multiple versions of the - // same object. By default, x-amz-copy-source identifies the current version of - // the source object to copy. To copy a specific version of the source object to - // copy, append ?versionId= to the x-amz-copy-source request header (for example, - // x-amz-copy-source: - // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). - // - // If the current version is a delete marker and you don't specify a versionId in - // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, - // because the object does not exist. If you specify versionId in the - // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an - // HTTP 400 Bad Request error, because you are not allowed to specify a delete - // marker as a version for the x-amz-copy-source . - // - // Directory buckets - S3 Versioning isn't enabled and supported for directory - // buckets. - // - // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html - // - // This member is required. - CopySource *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - // - // This member is required. - PartNumber *int32 - - // Upload ID identifying the multipart upload whose part is being copied. - // - // This member is required. - UploadId *string - - // Copies the object if its entity tag (ETag) matches the specified tag. - // - // If both of the x-amz-copy-source-if-match and - // x-amz-copy-source-if-unmodified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-match condition evaluates to true , and; - // - // x-amz-copy-source-if-unmodified-since condition evaluates to false ; - // - // Amazon S3 returns 200 OK and copies the data. - CopySourceIfMatch *string - - // Copies the object if it has been modified since the specified time. - // - // If both of the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-none-match condition evaluates to false , and; - // - // x-amz-copy-source-if-modified-since condition evaluates to true ; - // - // Amazon S3 returns 412 Precondition Failed response code. - CopySourceIfModifiedSince *time.Time - - // Copies the object if its entity tag (ETag) is different than the specified ETag. - // - // If both of the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-none-match condition evaluates to false , and; - // - // x-amz-copy-source-if-modified-since condition evaluates to true ; - // - // Amazon S3 returns 412 Precondition Failed response code. - CopySourceIfNoneMatch *string - - // Copies the object if it hasn't been modified since the specified time. - // - // If both of the x-amz-copy-source-if-match and - // x-amz-copy-source-if-unmodified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-match condition evaluates to true , and; - // - // x-amz-copy-source-if-unmodified-since condition evaluates to false ; - // - // Amazon S3 returns 200 OK and copies the data. - CopySourceIfUnmodifiedSince *time.Time - - // The range of bytes to copy from the source object. The range value must use the - // form bytes=first-last, where the first and last are the zero-based byte offsets - // to copy. For example, bytes=0-9 indicates that you want to copy the first 10 - // bytes of the source. You can copy a range only if the source object is greater - // than 5 MB. - CopySourceRange *string - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKeyMD5 *string - - // The account ID of the expected destination bucket owner. If the account ID that - // you provide does not match the actual owner of the destination bucket, the - // request fails with the HTTP status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The account ID of the expected source bucket owner. If the account ID that you - // provide does not match the actual owner of the source bucket, the request fails - // with the HTTP status code 403 Forbidden (access denied). - ExpectedSourceBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.DisableS3ExpressSessionAuth = ptr.Bool(true) -} - -type UploadPartCopyOutput struct { - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Container for all response elements. - CopyPartResult *types.CopyPartResult - - // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceVersionId *string - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPartCopy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *UploadPartCopyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UploadPartCopy", - } -} - -// getUploadPartCopyBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getUploadPartCopyBucketMember(input interface{}) (*string, bool) { - in := input.(*UploadPartCopyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getUploadPartCopyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go deleted file mode 100644 index fefeb1be614b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ /dev/null @@ -1,576 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strings" - "time" -) - -// This operation is not supported for directory buckets. -// -// Passes transformed objects to a GetObject operation when using Object Lambda -// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the -// Amazon S3 User Guide. -// -// This operation supports metadata that can be returned by [GetObject], in addition to -// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The -// GetObject response metadata is supported so that the WriteGetObjectResponse -// caller, typically an Lambda function, can provide the same metadata when it -// internally invokes GetObject . When WriteGetObjectResponse is called by a -// customer-owned Lambda function, the metadata returned to the end user GetObject -// call might differ from what Amazon S3 would normally return. -// -// You can include any number of metadata headers. When including a metadata -// header, it should be prefaced with x-amz-meta . For example, -// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to -// forward GetObject metadata. -// -// Amazon Web Services provides some prebuilt Lambda functions that you can use -// with S3 Object Lambda to detect and redact personally identifiable information -// (PII) and decompress S3 objects. These Lambda functions are available in the -// Amazon Web Services Serverless Application Repository, and can be selected -// through the Amazon Web Services Management Console when you create your Object -// Lambda access point. -// -// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a -// natural language processing (NLP) service using machine learning to find -// insights and relationships in text. It automatically detects personally -// identifiable information (PII) such as names, addresses, dates, credit card -// numbers, and social security numbers from documents in your Amazon S3 bucket. -// -// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a -// natural language processing (NLP) service using machine learning to find -// insights and relationships in text. It automatically redacts personally -// identifiable information (PII) such as names, addresses, dates, credit card -// numbers, and social security numbers from documents in your Amazon S3 bucket. -// -// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is -// equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. -// -// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 -// User Guide. -// -// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html -// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { - if params == nil { - params = &WriteGetObjectResponseInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "WriteGetObjectResponse", params, optFns, c.addOperationWriteGetObjectResponseMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*WriteGetObjectResponseOutput) - out.ResultMetadata = metadata - return out, nil -} - -type WriteGetObjectResponseInput struct { - - // Route prefix to the HTTP URL generated. - // - // This member is required. - RequestRoute *string - - // A single use encrypted token that maps WriteGetObjectResponse to the end user - // GetObject request. - // - // This member is required. - RequestToken *string - - // Indicates that a range of bytes was specified. - AcceptRanges *string - - // The object data. - Body io.Reader - - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for - // server-side encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 160-bit SHA1 digest of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 256-bit SHA256 digest of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Specifies presentational information for the object. - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // The size of the content body in bytes. - ContentLength *int64 - - // The portion of the object returned in the response. - ContentRange *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) - // a delete marker. To learn more about delete markers, see [Working with delete markers]. - // - // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html - DeleteMarker *bool - - // An opaque identifier assigned by a web server to a specific version of a - // resource found at a URL. - ETag *string - - // A string that uniquely identifies an error condition. Returned in the tag of - // the error XML response for a corresponding GetObject call. Cannot be used with - // a successful StatusCode header or when the transformed object is provided in - // the body. All error codes from S3 are sentence-cased. The regular expression - // (regex) value is "^[A-Z][a-zA-Z]+$" . - ErrorCode *string - - // Contains a generic description of the error condition. Returned in the tag of - // the error XML response for a corresponding GetObject call. Cannot be used with - // a successful StatusCode header or when the transformed object is provided in - // body. - ErrorMessage *string - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // that provide the object expiration information. The value of the rule-id is - // URL-encoded. - Expiration *string - - // The date and time at which the object is no longer cacheable. - Expires *time.Time - - // The date and time that the object was last modified. - LastModified *time.Time - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Set to the number of metadata entries not returned in x-amz-meta headers. This - // can happen if you create metadata using an API like SOAP that supports more - // flexible metadata than the REST API. For example, using SOAP, you can create - // metadata whose values are not legal HTTP headers. - MissingMeta *int32 - - // Indicates whether an object stored in Amazon S3 has an active legal hold. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see [Object Lock]. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html - ObjectLockMode types.ObjectLockMode - - // The date and time when Object Lock is configured to expire. - ObjectLockRetainUntilDate *time.Time - - // The count of parts this object has. - PartsCount *int32 - - // Indicates if request involves bucket that is either a source or destination in - // a Replication rule. For more information about S3 Replication, see [Replication]. - // - // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html - ReplicationStatus types.ReplicationStatus - - // If present, indicates that the requester was successfully charged for the - // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user - // guide. - // - // This functionality is not supported for directory buckets. - // - // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html - RequestCharged types.RequestCharged - - // Provides information about object restoration operation and expiration time of - // the restored object copy. - Restore *string - - // Encryption algorithm used if server-side encryption with a customer-provided - // encryption key was specified for object stored in Amazon S3. - SSECustomerAlgorithm *string - - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to - // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. - // - // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web - // Services Key Management Service (Amazon Web Services KMS) symmetric encryption - // customer managed key that was used for stored in Amazon S3 object. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when storing requested object in - // Amazon S3 or Amazon FSx. - // - // When accessing data stored in Amazon FSx file systems using S3 access points, - // the only valid server side encryption option is aws:fsx . - ServerSideEncryption types.ServerSideEncryption - - // The integer status code for an HTTP response of a corresponding GetObject - // request. The following is a list of status codes. - // - // - 200 - OK - // - // - 206 - Partial Content - // - // - 304 - Not Modified - // - // - 400 - Bad Request - // - // - 401 - Unauthorized - // - // - 403 - Forbidden - // - // - 404 - Not Found - // - // - 405 - Method Not Allowed - // - // - 409 - Conflict - // - // - 411 - Length Required - // - // - 412 - Precondition Failed - // - // - 416 - Range Not Satisfiable - // - // - 500 - Internal Server Error - // - // - 503 - Service Unavailable - StatusCode *int32 - - // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. - // - // For more information, see [Storage Classes]. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The number of tags, if any, on the object. - TagCount *int32 - - // An ID used to reference a specific version of the object. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *WriteGetObjectResponseInput) bindEndpointParams(p *EndpointParameters) { - - p.UseObjectLambdaEndpoint = ptr.Bool(true) -} - -type WriteGetObjectResponseOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpWriteGetObjectResponse{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "WriteGetObjectResponse"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addUnsignedPayload(stack); err != nil { - return err - } - if err = addContentSHA256Header(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { - return err - } - if err = addOpWriteGetObjectResponseValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opWriteGetObjectResponse(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -type endpointPrefix_opWriteGetObjectResponseMiddleware struct { -} - -func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string { - return "EndpointHostPrefix" -} - -func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - opaqueInput := getOperationInput(ctx) - input, ok := opaqueInput.(*WriteGetObjectResponseInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input type %T", opaqueInput) - } - - var prefix strings.Builder - if input.RequestRoute == nil { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so may not be nil")} - } else if !smithyhttp.ValidHostLabel(*input.RequestRoute) { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.RequestRoute)} - } else { - prefix.WriteString(*input.RequestRoute) - } - prefix.WriteString(".") - req.URL.Host = prefix.String() + req.URL.Host - - return next.HandleFinalize(ctx, in) -} -func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, "ResolveEndpointV2", middleware.After) -} - -func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "WriteGetObjectResponse", - } -} - -func addWriteGetObjectResponseUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: nopGetBucketAccessor, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: true, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go deleted file mode 100644 index f358f3aa88bd..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go +++ /dev/null @@ -1,373 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "slices" - "strings" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) { - params.endpointParams = bindEndpointParams(ctx, input, options) -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The endpoint resolver parameters for this operation. This service's default - // resolver delegates to endpoint rules. - endpointParams *EndpointParameters - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthEndpointParams(ctx, params, input, options) - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "WriteGetObjectResponse": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "s3") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - smithyhttp.SetIsUnsignedPayload(&props, true) - return props - }(), - }, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "s3") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - - { - SchemeID: smithyauth.SchemeIDSigV4A, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4ASigningName(&props, "s3") - smithyhttp.SetSigV4ASigningRegions(&props, []string{params.Region}) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - sorted := sortAuthOptions(options, m.options.AuthSchemePreference) - for _, option := range sorted { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { - byPriority := make([]*smithyauth.Option, 0, len(options)) - for _, prefName := range preferred { - for _, option := range options { - optName := option.SchemeID - if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { - optName = parts[1] - } - if prefName == optName { - byPriority = append(byPriority, option) - } - } - } - for _, option := range options { - if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { - return o.SchemeID == option.SchemeID - }) { - byPriority = append(byPriority, option) - } - } - return byPriority -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go deleted file mode 100644 index 860af056aa2b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go +++ /dev/null @@ -1,47 +0,0 @@ -package s3 - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" -) - -// putBucketContextMiddleware stores the input bucket name within the request context (if -// present) which is required for a variety of custom S3 behaviors -type putBucketContextMiddleware struct{} - -func (*putBucketContextMiddleware) ID() string { - return "putBucketContext" -} - -func (m *putBucketContextMiddleware) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if bucket, ok := m.bucketFromInput(in.Parameters); ok { - ctx = customizations.SetBucket(ctx, bucket) - } - return next.HandleSerialize(ctx, in) -} - -func (m *putBucketContextMiddleware) bucketFromInput(params interface{}) (string, bool) { - v, ok := params.(bucketer) - if !ok { - return "", false - } - - return v.bucket() -} - -func addPutBucketContextMiddleware(stack *middleware.Stack) error { - // This is essentially a post-Initialize task - only run it once the input - // has received all modifications from that phase. Therefore we add it as - // an early Serialize step. - // - // FUTURE: it would be nice to have explicit phases that only we as SDK - // authors can hook into (such as between phases like this really should - // be) - return stack.Serialize.Add(&putBucketContextMiddleware{}, middleware.Before) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go deleted file mode 100644 index 4e7f7e24e1c1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go +++ /dev/null @@ -1,15 +0,0 @@ -package s3 - -// implemented by all S3 input structures -type bucketer interface { - bucket() (string, bool) -} - -func bucketFromInput(params interface{}) (string, bool) { - v, ok := params.(bucketer) - if !ok { - return "", false - } - - return v.bucket() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go deleted file mode 100644 index 5803b9e45aeb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -// backfills checksum algorithm onto the context for CreateMultipart upload so -// transfer manager can set a checksum header on the request accordingly for -// s3express requests -type setCreateMPUChecksumAlgorithm struct{} - -func (*setCreateMPUChecksumAlgorithm) ID() string { - return "setCreateMPUChecksumAlgorithm" -} - -func (*setCreateMPUChecksumAlgorithm) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unexpected input type %T", in.Parameters) - } - - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(input.ChecksumAlgorithm)) - return next.HandleSerialize(ctx, in) -} - -func addSetCreateMPUChecksumAlgorithm(s *middleware.Stack) error { - return s.Serialize.Add(&setCreateMPUChecksumAlgorithm{}, middleware.Before) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go deleted file mode 100644 index 26f1235b1625..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ /dev/null @@ -1,25226 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" - awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "strconv" - "strings" - "time" -) - -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - -type awsRestxml_deserializeOpAbortMultipartUpload struct { -} - -func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata) - } - output := &AbortMultipartUploadOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchUpload", errorCode): - return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpCompleteMultipartUpload struct { -} - -func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata) - } - output := &CompleteMultipartUploadOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CompleteMultipartUploadOutput - if *v == nil { - sv = &CompleteMultipartUploadOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Location", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Location = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpCopyObject struct { -} - -func (*awsRestxml_deserializeOpCopyObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata) - } - output := &CopyObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ObjectNotInActiveTierError", errorCode): - return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CopySourceVersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CopyObjectOutput - if *v == nil { - sv = &CopyObjectOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CopyObjectResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpCreateBucket struct { -} - -func (*awsRestxml_deserializeOpCreateBucket) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata) - } - output := &CreateBucketOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("BucketAlreadyExists", errorCode): - return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody) - - case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode): - return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-bucket-arn"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketArn = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Location"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Location = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpCreateBucketMetadataConfiguration struct { -} - -func (*awsRestxml_deserializeOpCreateBucketMetadataConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateBucketMetadataConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataConfiguration(response, &metadata) - } - output := &CreateBucketMetadataConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateBucketMetadataConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response, &metadata) - } - output := &CreateBucketMetadataTableConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpCreateMultipartUpload struct { -} - -func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata) - } - output := &CreateMultipartUploadOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.AbortDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AbortRuleId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CreateMultipartUploadOutput - if *v == nil { - sv = &CreateMultipartUploadOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("UploadId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpCreateSession struct { -} - -func (*awsRestxml_deserializeOpCreateSession) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateSession(response, &metadata) - } - output := &CreateSessionOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentCreateSessionOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchBucket", errorCode): - return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(v *CreateSessionOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CreateSessionOutput - if *v == nil { - sv = &CreateSessionOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSessionCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpDeleteBucket struct { -} - -func (*awsRestxml_deserializeOpDeleteBucket) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata) - } - output := &DeleteBucketOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata) - } - output := &DeleteBucketAnalyticsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketCors struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata) - } - output := &DeleteBucketCorsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketEncryption struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata) - } - output := &DeleteBucketEncryptionOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata) - } - output := &DeleteBucketIntelligentTieringConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata) - } - output := &DeleteBucketInventoryConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketLifecycle struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata) - } - output := &DeleteBucketLifecycleOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketMetadataConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketMetadataConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketMetadataConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataConfiguration(response, &metadata) - } - output := &DeleteBucketMetadataConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketMetadataConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response, &metadata) - } - output := &DeleteBucketMetadataTableConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata) - } - output := &DeleteBucketMetricsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata) - } - output := &DeleteBucketOwnershipControlsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketPolicy struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata) - } - output := &DeleteBucketPolicyOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketReplication struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata) - } - output := &DeleteBucketReplicationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketTagging struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata) - } - output := &DeleteBucketTaggingOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketWebsite struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata) - } - output := &DeleteBucketWebsiteOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteObject struct { -} - -func (*awsRestxml_deserializeOpDeleteObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata) - } - output := &DeleteObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpDeleteObjects struct { -} - -func (*awsRestxml_deserializeOpDeleteObjects) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata) - } - output := &DeleteObjectsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DeleteObjectsOutput - if *v == nil { - sv = &DeleteObjectsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Deleted", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Error", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpDeleteObjectTagging struct { -} - -func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata) - } - output := &DeleteObjectTaggingOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpDeletePublicAccessBlock struct { -} - -func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata) - } - output := &DeletePublicAccessBlockOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata) - } - output := &GetBucketAccelerateConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(v *GetBucketAccelerateConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketAccelerateConfigurationOutput - if *v == nil { - sv = &GetBucketAccelerateConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.BucketAccelerateStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketAcl struct { -} - -func (*awsRestxml_deserializeOpGetBucketAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata) - } - output := &GetBucketAclOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketAclOutput - if *v == nil { - sv = &GetBucketAclOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessControlList", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata) - } - output := &GetBucketAnalyticsConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketAnalyticsConfigurationOutput - if *v == nil { - sv = &GetBucketAnalyticsConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketCors struct { -} - -func (*awsRestxml_deserializeOpGetBucketCors) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata) - } - output := &GetBucketCorsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketCorsOutput - if *v == nil { - sv = &GetBucketCorsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CORSRule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketEncryption struct { -} - -func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata) - } - output := &GetBucketEncryptionOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketEncryptionOutput - if *v == nil { - sv = &GetBucketEncryptionOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata) - } - output := &GetBucketIntelligentTieringConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketIntelligentTieringConfigurationOutput - if *v == nil { - sv = &GetBucketIntelligentTieringConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata) - } - output := &GetBucketInventoryConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketInventoryConfigurationOutput - if *v == nil { - sv = &GetBucketInventoryConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("InventoryConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata) - } - output := &GetBucketLifecycleConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(v *GetBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketLifecycleConfigurationOutput - if *v == nil { - sv = &GetBucketLifecycleConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketLocation struct { -} - -func (*awsRestxml_deserializeOpGetBucketLocation) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) - } - output := &GetBucketLocationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketLocationOutput - if *v == nil { - sv = &GetBucketLocationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("LocationConstraint", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.LocationConstraint = types.BucketLocationConstraint(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketLogging struct { -} - -func (*awsRestxml_deserializeOpGetBucketLogging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata) - } - output := &GetBucketLoggingOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketLoggingOutput - if *v == nil { - sv = &GetBucketLoggingOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("LoggingEnabled", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketMetadataConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketMetadataConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketMetadataConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataConfiguration(response, &metadata) - } - output := &GetBucketMetadataConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentGetBucketMetadataConfigurationResult(&output.GetBucketMetadataConfigurationResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketMetadataConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketMetadataConfigurationOutput(v **GetBucketMetadataConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketMetadataConfigurationOutput - if *v == nil { - sv = &GetBucketMetadataConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("GetBucketMetadataConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGetBucketMetadataConfigurationResult(&sv.GetBucketMetadataConfigurationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response, &metadata) - } - output := &GetBucketMetadataTableConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&output.GetBucketMetadataTableConfigurationResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketMetadataTableConfigurationOutput(v **GetBucketMetadataTableConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketMetadataTableConfigurationOutput - if *v == nil { - sv = &GetBucketMetadataTableConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("GetBucketMetadataTableConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&sv.GetBucketMetadataTableConfigurationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) - } - output := &GetBucketMetricsConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketMetricsConfigurationOutput - if *v == nil { - sv = &GetBucketMetricsConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("MetricsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) - } - output := &GetBucketNotificationConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketNotificationConfigurationOutput - if *v == nil { - sv = &GetBucketNotificationConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("QueueConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TopicConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketOwnershipControls struct { -} - -func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata) - } - output := &GetBucketOwnershipControlsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketOwnershipControlsOutput - if *v == nil { - sv = &GetBucketOwnershipControlsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("OwnershipControls", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketPolicy struct { -} - -func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata) - } - output := &GetBucketPolicyOutput{} - out.Result = output - - err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(output, response.Body, response.ContentLength) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v *GetBucketPolicyOutput, body io.ReadCloser, contentLength int64) error { - if v == nil { - return fmt.Errorf("unsupported deserialization of nil %T", v) - } - var buf bytes.Buffer - if contentLength > 0 { - buf.Grow(int(contentLength)) - } else { - buf.Grow(512) - } - - _, err := buf.ReadFrom(body) - if err != nil { - return err - } - if buf.Len() > 0 { - v.Policy = ptr.String(buf.String()) - } - return nil -} - -type awsRestxml_deserializeOpGetBucketPolicyStatus struct { -} - -func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata) - } - output := &GetBucketPolicyStatusOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketPolicyStatusOutput - if *v == nil { - sv = &GetBucketPolicyStatusOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PolicyStatus", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketReplication struct { -} - -func (*awsRestxml_deserializeOpGetBucketReplication) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata) - } - output := &GetBucketReplicationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketReplicationOutput - if *v == nil { - sv = &GetBucketReplicationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ReplicationConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketRequestPayment struct { -} - -func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata) - } - output := &GetBucketRequestPaymentOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketRequestPaymentOutput - if *v == nil { - sv = &GetBucketRequestPaymentOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Payer", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Payer = types.Payer(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketTagging struct { -} - -func (*awsRestxml_deserializeOpGetBucketTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata) - } - output := &GetBucketTaggingOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketTaggingOutput - if *v == nil { - sv = &GetBucketTaggingOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TagSet", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketVersioning struct { -} - -func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata) - } - output := &GetBucketVersioningOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketVersioningOutput - if *v == nil { - sv = &GetBucketVersioningOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("MfaDelete", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.MFADelete = types.MFADeleteStatus(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.BucketVersioningStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketWebsite struct { -} - -func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata) - } - output := &GetBucketWebsiteOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketWebsiteOutput - if *v == nil { - sv = &GetBucketWebsiteOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ErrorDocument", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IndexDocument", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RoutingRules", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObject struct { -} - -func (*awsRestxml_deserializeOpGetObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata) - } - output := &GetObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("InvalidObjectState", errorCode): - return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody) - - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AcceptRanges = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CacheControl = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentDisposition = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentEncoding = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentLanguage = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 64) - if err != nil { - return err - } - v.ContentLength = ptr.Int64(vv) - } - - if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentRange = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentType = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - deserOverride, err := deserializeS3Expires(headerValues[0]) - if err != nil { - return err - } - v.Expires = deserOverride - - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ExpiresString = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.LastModified = ptr.Time(t) - } - - for headerKey, headerValues := range response.Header { - if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { - if v.Metadata == nil { - v.Metadata = map[string]string{} - } - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] - } - } - - if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.MissingMeta = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseDateTime(headerValues[0]) - if err != nil { - return err - } - v.ObjectLockRetainUntilDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.PartsCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Restore = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.StorageClass = types.StorageClass(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.TagCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.WebsiteRedirectLocation = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error { - if v == nil { - return fmt.Errorf("unsupported deserialization of nil %T", v) - } - v.Body = body - return nil -} - -type awsRestxml_deserializeOpGetObjectAcl struct { -} - -func (*awsRestxml_deserializeOpGetObjectAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata) - } - output := &GetObjectAclOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectAclOutput - if *v == nil { - sv = &GetObjectAclOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessControlList", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectAttributes struct { -} - -func (*awsRestxml_deserializeOpGetObjectAttributes) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectAttributes(response, &metadata) - } - output := &GetObjectAttributesOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectAttributesOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.LastModified = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttributesOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectAttributesOutput - if *v == nil { - sv = &GetObjectAttributesOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Checksum", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentChecksum(&sv.Checksum, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("ObjectParts", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGetObjectAttributesParts(&sv.ObjectParts, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ObjectSize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSize = ptr.Int64(i64) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectLegalHold struct { -} - -func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata) - } - output := &GetObjectLegalHoldOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectLegalHoldOutput - if *v == nil { - sv = &GetObjectLegalHoldOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("LegalHold", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectLockConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata) - } - output := &GetObjectLockConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectLockConfigurationOutput - if *v == nil { - sv = &GetObjectLockConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectLockConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectRetention struct { -} - -func (*awsRestxml_deserializeOpGetObjectRetention) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata) - } - output := &GetObjectRetentionOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectRetentionOutput - if *v == nil { - sv = &GetObjectRetentionOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Retention", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectTagging struct { -} - -func (*awsRestxml_deserializeOpGetObjectTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata) - } - output := &GetObjectTaggingOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectTaggingOutput - if *v == nil { - sv = &GetObjectTaggingOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TagSet", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectTorrent struct { -} - -func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata) - } - output := &GetObjectTorrentOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error { - if v == nil { - return fmt.Errorf("unsupported deserialization of nil %T", v) - } - v.Body = body - return nil -} - -type awsRestxml_deserializeOpGetPublicAccessBlock struct { -} - -func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata) - } - output := &GetPublicAccessBlockOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetPublicAccessBlockOutput - if *v == nil { - sv = &GetPublicAccessBlockOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpHeadBucket struct { -} - -func (*awsRestxml_deserializeOpHeadBucket) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata) - } - output := &HeadBucketOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NotFound", errorCode): - return awsRestxml_deserializeErrorNotFound(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(v *HeadBucketOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-access-point-alias"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.AccessPointAlias = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-bucket-arn"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketArn = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-bucket-location-name"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketLocationName = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-bucket-location-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketLocationType = types.LocationType(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-bucket-region"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketRegion = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpHeadObject struct { -} - -func (*awsRestxml_deserializeOpHeadObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata) - } - output := &HeadObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NotFound", errorCode): - return awsRestxml_deserializeErrorNotFound(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AcceptRanges = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ArchiveStatus = types.ArchiveStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CacheControl = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentDisposition = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentEncoding = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentLanguage = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 64) - if err != nil { - return err - } - v.ContentLength = ptr.Int64(vv) - } - - if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentRange = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentType = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - deserOverride, err := deserializeS3Expires(headerValues[0]) - if err != nil { - return err - } - v.Expires = deserOverride - - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ExpiresString = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.LastModified = ptr.Time(t) - } - - for headerKey, headerValues := range response.Header { - if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { - if v.Metadata == nil { - v.Metadata = map[string]string{} - } - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] - } - } - - if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.MissingMeta = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseDateTime(headerValues[0]) - if err != nil { - return err - } - v.ObjectLockRetainUntilDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.PartsCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Restore = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.StorageClass = types.StorageClass(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.TagCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.WebsiteRedirectLocation = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata) - } - output := &ListBucketAnalyticsConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketAnalyticsConfigurationsOutput - if *v == nil { - sv = &ListBucketAnalyticsConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata) - } - output := &ListBucketIntelligentTieringConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketIntelligentTieringConfigurationsOutput - if *v == nil { - sv = &ListBucketIntelligentTieringConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBucketInventoryConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata) - } - output := &ListBucketInventoryConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketInventoryConfigurationsOutput - if *v == nil { - sv = &ListBucketInventoryConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("InventoryConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBucketMetricsConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata) - } - output := &ListBucketMetricsConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketMetricsConfigurationsOutput - if *v == nil { - sv = &ListBucketMetricsConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("MetricsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBuckets struct { -} - -func (*awsRestxml_deserializeOpListBuckets) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata) - } - output := &ListBucketsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketsOutput - if *v == nil { - sv = &ListBucketsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Buckets", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListDirectoryBuckets struct { -} - -func (*awsRestxml_deserializeOpListDirectoryBuckets) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListDirectoryBuckets(response, &metadata) - } - output := &ListDirectoryBucketsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListDirectoryBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(v **ListDirectoryBucketsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListDirectoryBucketsOutput - if *v == nil { - sv = &ListDirectoryBucketsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Buckets", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListMultipartUploads struct { -} - -func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) - } - output := &ListMultipartUploadsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(v *ListMultipartUploadsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListMultipartUploadsOutput - if *v == nil { - sv = &ListMultipartUploadsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("KeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("MaxUploads", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxUploads = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NextKeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextKeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("NextUploadIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextUploadIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("UploadIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Upload", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListObjects struct { -} - -func (*awsRestxml_deserializeOpListObjects) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata) - } - output := &ListObjectsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListObjectsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchBucket", errorCode): - return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListObjectsOutput(v *ListObjectsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListObjectsOutput - if *v == nil { - sv = &ListObjectsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Contents", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("Marker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Marker = ptr.String(xtv) - } - - case strings.EqualFold("MaxKeys", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxKeys = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - case strings.EqualFold("NextMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextMarker = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListObjectsV2 struct { -} - -func (*awsRestxml_deserializeOpListObjectsV2) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata) - } - output := &ListObjectsV2Output{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchBucket", errorCode): - return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(v *ListObjectsV2Output, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListObjectsV2Output - if *v == nil { - sv = &ListObjectsV2Output{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Contents", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("KeyCount", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.KeyCount = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("MaxKeys", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxKeys = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("StartAfter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StartAfter = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListObjectVersions struct { -} - -func (*awsRestxml_deserializeOpListObjectVersions) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata) - } - output := &ListObjectVersionsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(v *ListObjectVersionsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListObjectVersionsOutput - if *v == nil { - sv = &ListObjectVersionsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("DeleteMarker", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("KeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("MaxKeys", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxKeys = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - case strings.EqualFold("NextKeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextKeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("NextVersionIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextVersionIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("VersionIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Version", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListParts struct { -} - -func (*awsRestxml_deserializeOpListParts) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata) - } - output := &ListPartsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.AbortDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AbortRuleId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListPartsOutput - if *v == nil { - sv = &ListPartsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("Initiator", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("MaxParts", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxParts = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NextPartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextPartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.PartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("Part", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - case strings.EqualFold("UploadId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata) - } - output := &PutBucketAccelerateConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketAcl struct { -} - -func (*awsRestxml_deserializeOpPutBucketAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata) - } - output := &PutBucketAclOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata) - } - output := &PutBucketAnalyticsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketCors struct { -} - -func (*awsRestxml_deserializeOpPutBucketCors) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata) - } - output := &PutBucketCorsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketEncryption struct { -} - -func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata) - } - output := &PutBucketEncryptionOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata) - } - output := &PutBucketIntelligentTieringConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata) - } - output := &PutBucketInventoryConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata) - } - output := &PutBucketLifecycleConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(v *PutBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutBucketLogging struct { -} - -func (*awsRestxml_deserializeOpPutBucketLogging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata) - } - output := &PutBucketLoggingOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata) - } - output := &PutBucketMetricsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata) - } - output := &PutBucketNotificationConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketOwnershipControls struct { -} - -func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata) - } - output := &PutBucketOwnershipControlsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketPolicy struct { -} - -func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata) - } - output := &PutBucketPolicyOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketReplication struct { -} - -func (*awsRestxml_deserializeOpPutBucketReplication) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata) - } - output := &PutBucketReplicationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketRequestPayment struct { -} - -func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata) - } - output := &PutBucketRequestPaymentOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketTagging struct { -} - -func (*awsRestxml_deserializeOpPutBucketTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata) - } - output := &PutBucketTaggingOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketVersioning struct { -} - -func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata) - } - output := &PutBucketVersioningOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketWebsite struct { -} - -func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata) - } - output := &PutBucketWebsiteOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutObject struct { -} - -func (*awsRestxml_deserializeOpPutObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata) - } - output := &PutObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("EncryptionTypeMismatch", errorCode): - return awsRestxml_deserializeErrorEncryptionTypeMismatch(response, errorBody) - - case strings.EqualFold("InvalidRequest", errorCode): - return awsRestxml_deserializeErrorInvalidRequest(response, errorBody) - - case strings.EqualFold("InvalidWriteOffset", errorCode): - return awsRestxml_deserializeErrorInvalidWriteOffset(response, errorBody) - - case strings.EqualFold("TooManyParts", errorCode): - return awsRestxml_deserializeErrorTooManyParts(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-size"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 64) - if err != nil { - return err - } - v.Size = ptr.Int64(vv) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectAcl struct { -} - -func (*awsRestxml_deserializeOpPutObjectAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata) - } - output := &PutObjectAclOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectLegalHold struct { -} - -func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata) - } - output := &PutObjectLegalHoldOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectLockConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata) - } - output := &PutObjectLockConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectRetention struct { -} - -func (*awsRestxml_deserializeOpPutObjectRetention) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata) - } - output := &PutObjectRetentionOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectTagging struct { -} - -func (*awsRestxml_deserializeOpPutObjectTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata) - } - output := &PutObjectTaggingOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutPublicAccessBlock struct { -} - -func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata) - } - output := &PutPublicAccessBlockOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpRenameObject struct { -} - -func (*awsRestxml_deserializeOpRenameObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpRenameObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorRenameObject(response, &metadata) - } - output := &RenameObjectOutput{} - out.Result = output - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorRenameObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("IdempotencyParameterMismatch", errorCode): - return awsRestxml_deserializeErrorIdempotencyParameterMismatch(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpRestoreObject struct { -} - -func (*awsRestxml_deserializeOpRestoreObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata) - } - output := &RestoreObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode): - return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RestoreOutputPath = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpSelectObjectContent struct { -} - -func (*awsRestxml_deserializeOpSelectObjectContent) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorSelectObjectContent(response, &metadata) - } - output := &SelectObjectContentOutput{} - out.Result = output - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorSelectObjectContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpUpdateBucketMetadataInventoryTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpUpdateBucketMetadataInventoryTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpUpdateBucketMetadataInventoryTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorUpdateBucketMetadataInventoryTableConfiguration(response, &metadata) - } - output := &UpdateBucketMetadataInventoryTableConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorUpdateBucketMetadataInventoryTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpUpdateBucketMetadataJournalTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpUpdateBucketMetadataJournalTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpUpdateBucketMetadataJournalTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorUpdateBucketMetadataJournalTableConfiguration(response, &metadata) - } - output := &UpdateBucketMetadataJournalTableConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorUpdateBucketMetadataJournalTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpUploadPart struct { -} - -func (*awsRestxml_deserializeOpUploadPart) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata) - } - output := &UploadPartOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpUploadPartCopy struct { -} - -func (*awsRestxml_deserializeOpUploadPartCopy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata) - } - output := &UploadPartCopyOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CopySourceVersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *UploadPartCopyOutput - if *v == nil { - sv = &UploadPartCopyOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CopyPartResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpWriteGetObjectResponse struct { -} - -func (*awsRestxml_deserializeOpWriteGetObjectResponse) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorWriteGetObjectResponse(response, &metadata) - } - output := &WriteGetObjectResponseOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorWriteGetObjectResponse(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeEventStreamSelectObjectContentEventStream(v *types.SelectObjectContentEventStream, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - eventType := msg.Headers.Get(eventstreamapi.EventTypeHeader) - if eventType == nil { - return fmt.Errorf("%s event header not present", eventstreamapi.EventTypeHeader) - } - - switch { - case strings.EqualFold("Cont", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberCont{} - if err := awsRestxml_deserializeEventMessageContinuationEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("End", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberEnd{} - if err := awsRestxml_deserializeEventMessageEndEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("Progress", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberProgress{} - if err := awsRestxml_deserializeEventMessageProgressEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("Records", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberRecords{} - if err := awsRestxml_deserializeEventMessageRecordsEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("Stats", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberStats{} - if err := awsRestxml_deserializeEventMessageStatsEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - default: - buffer := bytes.NewBuffer(nil) - eventstream.NewEncoder().Encode(buffer, *msg) - *v = &types.UnknownUnionMember{ - Tag: eventType.String(), - Value: buffer.Bytes(), - } - return nil - - } -} - -func awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg *eventstream.Message) error { - exceptionType := msg.Headers.Get(eventstreamapi.ExceptionTypeHeader) - if exceptionType == nil { - return fmt.Errorf("%s event header not present", eventstreamapi.ExceptionTypeHeader) - } - - switch { - default: - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(br, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - errorComponents, err := awsxml.GetErrorResponseComponents(br, true) - if err != nil { - return err - } - errorCode := "UnknownError" - errorMessage := errorCode - if ev := exceptionType.String(); len(ev) > 0 { - errorCode = ev - } else if ev := errorComponents.Code; len(ev) > 0 { - errorCode = ev - } - if ev := errorComponents.Message; len(ev) > 0 { - errorMessage = ev - } - return &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - - } -} - -func awsRestxml_deserializeEventMessageRecordsEvent(v *types.RecordsEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - if msg.Payload != nil { - bsv := make([]byte, len(msg.Payload)) - copy(bsv, msg.Payload) - - v.Payload = bsv - } - return nil -} - -func awsRestxml_deserializeEventMessageStatsEvent(v *types.StatsEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentStats(&v.Details, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeEventMessageProgressEvent(v *types.ProgressEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentProgress(&v.Details, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeEventMessageContinuationEvent(v *types.ContinuationEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentContinuationEvent(&v, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeEventMessageEndEvent(v *types.EndEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentEndEvent(&v, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeDocumentContinuationEvent(v **types.ContinuationEvent, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ContinuationEvent - if *v == nil { - sv = &types.ContinuationEvent{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEndEvent(v **types.EndEvent, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EndEvent - if *v == nil { - sv = &types.EndEvent{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Progress - if *v == nil { - sv = &types.Progress{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BytesProcessed", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesProcessed = ptr.Int64(i64) - } - - case strings.EqualFold("BytesReturned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesReturned = ptr.Int64(i64) - } - - case strings.EqualFold("BytesScanned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesScanned = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Stats - if *v == nil { - sv = &types.Stats{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BytesProcessed", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesProcessed = ptr.Int64(i64) - } - - case strings.EqualFold("BytesReturned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesReturned = ptr.Int64(i64) - } - - case strings.EqualFold("BytesScanned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesScanned = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.BucketAlreadyExists{} - return output -} - -func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.BucketAlreadyOwnedByYou{} - return output -} - -func awsRestxml_deserializeErrorEncryptionTypeMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.EncryptionTypeMismatch{} - return output -} - -func awsRestxml_deserializeErrorIdempotencyParameterMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IdempotencyParameterMismatch{} - return output -} - -func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidObjectState{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsRestxml_deserializeErrorInvalidRequest(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequest{} - return output -} - -func awsRestxml_deserializeErrorInvalidWriteOffset(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidWriteOffset{} - return output -} - -func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchBucket{} - return output -} - -func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchKey{} - return output -} - -func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchUpload{} - return output -} - -func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NotFound{} - return output -} - -func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ObjectAlreadyInActiveTierError{} - return output -} - -func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ObjectNotInActiveTierError{} - return output -} - -func awsRestxml_deserializeErrorTooManyParts(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.TooManyParts{} - return output -} - -func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AbortIncompleteMultipartUpload - if *v == nil { - sv = &types.AbortIncompleteMultipartUpload{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DaysAfterInitiation", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.DaysAfterInitiation = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AccessControlTranslation - if *v == nil { - sv = &types.AccessControlTranslation{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Owner", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Owner = types.OwnerOverride(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsAndOperator - if *v == nil { - sv = &types.AnalyticsAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsConfiguration - if *v == nil { - sv = &types.AnalyticsConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("StorageClassAnalysis", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.AnalyticsConfiguration - if *v == nil { - sv = make([]types.AnalyticsConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.AnalyticsConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.AnalyticsConfiguration - if *v == nil { - sv = make([]types.AnalyticsConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.AnalyticsConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsExportDestination - if *v == nil { - sv = &types.AnalyticsExportDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3BucketDestination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var uv types.AnalyticsFilter - var memberFound bool - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - var mv types.AnalyticsAndOperator - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.AnalyticsFilterMemberAnd{Value: mv} - memberFound = true - - case strings.EqualFold("Prefix", t.Name.Local): - var mv string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - uv = &types.AnalyticsFilterMemberPrefix{Value: mv} - memberFound = true - - case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.AnalyticsFilterMemberTag{Value: mv} - memberFound = true - - default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true - - } - decoder = originalDecoder - } - *v = uv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsS3BucketDestination - if *v == nil { - sv = &types.AnalyticsS3BucketDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("BucketAccountId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.BucketAccountId = ptr.String(xtv) - } - - case strings.EqualFold("Format", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Format = types.AnalyticsS3ExportFileFormat(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Bucket - if *v == nil { - sv = &types.Bucket{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BucketArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.BucketArn = ptr.String(xtv) - } - - case strings.EqualFold("BucketRegion", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.BucketRegion = ptr.String(xtv) - } - - case strings.EqualFold("CreationDate", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.CreationDate = ptr.Time(t) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.BucketAlreadyExists - if *v == nil { - sv = &types.BucketAlreadyExists{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.BucketAlreadyOwnedByYou - if *v == nil { - sv = &types.BucketAlreadyOwnedByYou{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Bucket - if *v == nil { - sv = make([]types.Bucket, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Bucket", t.Name.Local): - var col types.Bucket - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { - var sv []types.Bucket - if *v == nil { - sv = make([]types.Bucket, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Bucket - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Checksum - if *v == nil { - sv = &types.Checksum{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentChecksumAlgorithmList(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ChecksumAlgorithm - if *v == nil { - sv = make([]types.ChecksumAlgorithm, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ChecksumAlgorithm - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = types.ChecksumAlgorithm(xtv) - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { - var sv []types.ChecksumAlgorithm - if *v == nil { - sv = make([]types.ChecksumAlgorithm, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ChecksumAlgorithm - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = types.ChecksumAlgorithm(xtv) - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CommonPrefix - if *v == nil { - sv = &types.CommonPrefix{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.CommonPrefix - if *v == nil { - sv = make([]types.CommonPrefix, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.CommonPrefix - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { - var sv []types.CommonPrefix - if *v == nil { - sv = make([]types.CommonPrefix, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.CommonPrefix - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Condition - if *v == nil { - sv = &types.Condition{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HttpErrorCodeReturnedEquals = ptr.String(xtv) - } - - case strings.EqualFold("KeyPrefixEquals", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyPrefixEquals = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CopyObjectResult - if *v == nil { - sv = &types.CopyObjectResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CopyPartResult - if *v == nil { - sv = &types.CopyPartResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CORSRule - if *v == nil { - sv = &types.CORSRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AllowedHeader", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("AllowedMethod", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("AllowedOrigin", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ExposeHeader", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("MaxAgeSeconds", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxAgeSeconds = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.CORSRule - if *v == nil { - sv = make([]types.CORSRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.CORSRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { - var sv []types.CORSRule - if *v == nil { - sv = make([]types.CORSRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.CORSRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DefaultRetention - if *v == nil { - sv = &types.DefaultRetention{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Mode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Mode = types.ObjectLockRetentionMode(xtv) - } - - case strings.EqualFold("Years", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Years = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DeletedObject - if *v == nil { - sv = &types.DeletedObject{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DeleteMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) - } - sv.DeleteMarker = ptr.Bool(xtv) - } - - case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DeleteMarkerVersionId = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.DeletedObject - if *v == nil { - sv = make([]types.DeletedObject, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.DeletedObject - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { - var sv []types.DeletedObject - if *v == nil { - sv = make([]types.DeletedObject, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.DeletedObject - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DeleteMarkerEntry - if *v == nil { - sv = &types.DeleteMarkerEntry{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsLatest", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) - } - sv.IsLatest = ptr.Bool(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DeleteMarkerReplication - if *v == nil { - sv = &types.DeleteMarkerReplication{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.DeleteMarkerReplicationStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.DeleteMarkerEntry - if *v == nil { - sv = make([]types.DeleteMarkerEntry, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.DeleteMarkerEntry - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { - var sv []types.DeleteMarkerEntry - if *v == nil { - sv = make([]types.DeleteMarkerEntry, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.DeleteMarkerEntry - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Destination - if *v == nil { - sv = &types.Destination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessControlTranslation", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("EncryptionConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Metrics", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ReplicationTime", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDestinationResult(v **types.DestinationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DestinationResult - if *v == nil { - sv = &types.DestinationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TableBucketArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableBucketArn = ptr.String(xtv) - } - - case strings.EqualFold("TableBucketType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableBucketType = types.S3TablesBucketType(xtv) - } - - case strings.EqualFold("TableNamespace", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableNamespace = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EncryptionConfiguration - if *v == nil { - sv = &types.EncryptionConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ReplicaKmsKeyID = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEncryptionTypeMismatch(v **types.EncryptionTypeMismatch, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EncryptionTypeMismatch - if *v == nil { - sv = &types.EncryptionTypeMismatch{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Error - if *v == nil { - sv = &types.Error{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Code", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Code = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrorDetails(v **types.ErrorDetails, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ErrorDetails - if *v == nil { - sv = &types.ErrorDetails{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ErrorCode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ErrorCode = ptr.String(xtv) - } - - case strings.EqualFold("ErrorMessage", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ErrorMessage = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ErrorDocument - if *v == nil { - sv = &types.ErrorDocument{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Error - if *v == nil { - sv = make([]types.Error, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Error - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error { - var sv []types.Error - if *v == nil { - sv = make([]types.Error, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Error - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentEventBridgeConfiguration(v **types.EventBridgeConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EventBridgeConfiguration - if *v == nil { - sv = &types.EventBridgeConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Event - if *v == nil { - sv = make([]types.Event, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Event - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = types.Event(xtv) - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error { - var sv []types.Event - if *v == nil { - sv = make([]types.Event, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Event - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = types.Event(xtv) - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ExistingObjectReplication - if *v == nil { - sv = &types.ExistingObjectReplication{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ExistingObjectReplicationStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.FilterRule - if *v == nil { - sv = &types.FilterRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = types.FilterRuleName(xtv) - } - - case strings.EqualFold("Value", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Value = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.FilterRule - if *v == nil { - sv = make([]types.FilterRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.FilterRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { - var sv []types.FilterRule - if *v == nil { - sv = make([]types.FilterRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.FilterRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentGetBucketMetadataConfigurationResult(v **types.GetBucketMetadataConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.GetBucketMetadataConfigurationResult - if *v == nil { - sv = &types.GetBucketMetadataConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("MetadataConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetadataConfigurationResult(&sv.MetadataConfigurationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(v **types.GetBucketMetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.GetBucketMetadataTableConfigurationResult - if *v == nil { - sv = &types.GetBucketMetadataTableConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Error", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("MetadataTableConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetadataTableConfigurationResult(&sv.MetadataTableConfigurationResult, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.GetObjectAttributesParts - if *v == nil { - sv = &types.GetObjectAttributesParts{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("MaxParts", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxParts = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NextPartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextPartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("PartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.PartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("Part", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPartsListUnwrapped(&sv.Parts, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PartsCount", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.TotalPartsCount = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Grant - if *v == nil { - sv = &types.Grant{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Grantee", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Permission", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Permission = types.Permission(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Grantee - if *v == nil { - sv = &types.Grantee{} - } else { - sv = *v - } - - for _, attr := range decoder.StartEl.Attr { - name := attr.Name.Local - if len(attr.Name.Space) != 0 { - name = attr.Name.Space + `:` + attr.Name.Local - } - switch { - case strings.EqualFold("xsi:type", name): - val := []byte(attr.Value) - { - xtv := string(val) - sv.Type = types.Type(xtv) - } - - } - } - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DisplayName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DisplayName = ptr.String(xtv) - } - - case strings.EqualFold("EmailAddress", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EmailAddress = ptr.String(xtv) - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("URI", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.URI = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Grant - if *v == nil { - sv = make([]types.Grant, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Grant", t.Name.Local): - var col types.Grant - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { - var sv []types.Grant - if *v == nil { - sv = make([]types.Grant, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Grant - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentIdempotencyParameterMismatch(v **types.IdempotencyParameterMismatch, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IdempotencyParameterMismatch - if *v == nil { - sv = &types.IdempotencyParameterMismatch{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IndexDocument - if *v == nil { - sv = &types.IndexDocument{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Suffix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Suffix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Initiator - if *v == nil { - sv = &types.Initiator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DisplayName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DisplayName = ptr.String(xtv) - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IntelligentTieringAndOperator - if *v == nil { - sv = &types.IntelligentTieringAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IntelligentTieringConfiguration - if *v == nil { - sv = &types.IntelligentTieringConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.IntelligentTieringStatus(xtv) - } - - case strings.EqualFold("Tiering", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.IntelligentTieringConfiguration - if *v == nil { - sv = make([]types.IntelligentTieringConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.IntelligentTieringConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.IntelligentTieringConfiguration - if *v == nil { - sv = make([]types.IntelligentTieringConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.IntelligentTieringConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IntelligentTieringFilter - if *v == nil { - sv = &types.IntelligentTieringFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidObjectState - if *v == nil { - sv = &types.InvalidObjectState{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessTier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessTier = types.IntelligentTieringAccessTier(xtv) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInvalidRequest(v **types.InvalidRequest, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidRequest - if *v == nil { - sv = &types.InvalidRequest{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInvalidWriteOffset(v **types.InvalidWriteOffset, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidWriteOffset - if *v == nil { - sv = &types.InvalidWriteOffset{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryConfiguration - if *v == nil { - sv = &types.InventoryConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Destination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("IncludedObjectVersions", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv) - } - - case strings.EqualFold("IsEnabled", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) - } - sv.IsEnabled = ptr.Bool(xtv) - } - - case strings.EqualFold("OptionalFields", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Schedule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.InventoryConfiguration - if *v == nil { - sv = make([]types.InventoryConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.InventoryConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.InventoryConfiguration - if *v == nil { - sv = make([]types.InventoryConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.InventoryConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryDestination - if *v == nil { - sv = &types.InventoryDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3BucketDestination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryEncryption - if *v == nil { - sv = &types.InventoryEncryption{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("SSE-KMS", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SSE-S3", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryFilter - if *v == nil { - sv = &types.InventoryFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.InventoryOptionalField - if *v == nil { - sv = make([]types.InventoryOptionalField, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("Field", t.Name.Local): - var col types.InventoryOptionalField - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = types.InventoryOptionalField(xtv) - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { - var sv []types.InventoryOptionalField - if *v == nil { - sv = make([]types.InventoryOptionalField, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.InventoryOptionalField - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = types.InventoryOptionalField(xtv) - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryS3BucketDestination - if *v == nil { - sv = &types.InventoryS3BucketDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccountId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccountId = ptr.String(xtv) - } - - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("Encryption", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Format", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Format = types.InventoryFormat(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventorySchedule - if *v == nil { - sv = &types.InventorySchedule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Frequency", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Frequency = types.InventoryFrequency(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryTableConfigurationResult(v **types.InventoryTableConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryTableConfigurationResult - if *v == nil { - sv = &types.InventoryTableConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ConfigurationState", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ConfigurationState = types.InventoryConfigurationState(xtv) - } - - case strings.EqualFold("Error", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TableArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableArn = ptr.String(xtv) - } - - case strings.EqualFold("TableName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableName = ptr.String(xtv) - } - - case strings.EqualFold("TableStatus", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableStatus = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentJournalTableConfigurationResult(v **types.JournalTableConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.JournalTableConfigurationResult - if *v == nil { - sv = &types.JournalTableConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Error", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RecordExpiration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRecordExpiration(&sv.RecordExpiration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TableArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableArn = ptr.String(xtv) - } - - case strings.EqualFold("TableName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableName = ptr.String(xtv) - } - - case strings.EqualFold("TableStatus", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableStatus = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LambdaFunctionConfiguration - if *v == nil { - sv = &types.LambdaFunctionConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Event", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("CloudFunction", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.LambdaFunctionArn = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.LambdaFunctionConfiguration - if *v == nil { - sv = make([]types.LambdaFunctionConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.LambdaFunctionConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.LambdaFunctionConfiguration - if *v == nil { - sv = make([]types.LambdaFunctionConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.LambdaFunctionConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleExpiration - if *v == nil { - sv = &types.LifecycleExpiration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Date", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Date = ptr.Time(t) - } - - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) - } - sv.ExpiredObjectDeleteMarker = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleRule - if *v == nil { - sv = &types.LifecycleRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Expiration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ExpirationStatus(xtv) - } - - case strings.EqualFold("Transition", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleRuleAndOperator - if *v == nil { - sv = &types.LifecycleRuleAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeGreaterThan = ptr.Int64(i64) - } - - case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeLessThan = ptr.Int64(i64) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRuleFilter(v **types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleRuleFilter - if *v == nil { - sv = &types.LifecycleRuleFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&sv.And, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeGreaterThan = ptr.Int64(i64) - } - - case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeLessThan = ptr.Int64(i64) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.LifecycleRule - if *v == nil { - sv = make([]types.LifecycleRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.LifecycleRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { - var sv []types.LifecycleRule - if *v == nil { - sv = make([]types.LifecycleRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.LifecycleRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LoggingEnabled - if *v == nil { - sv = &types.LoggingEnabled{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TargetBucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TargetBucket = ptr.String(xtv) - } - - case strings.EqualFold("TargetGrants", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TargetObjectKeyFormat", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTargetObjectKeyFormat(&sv.TargetObjectKeyFormat, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TargetPrefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TargetPrefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetadataConfigurationResult(v **types.MetadataConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetadataConfigurationResult - if *v == nil { - sv = &types.MetadataConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DestinationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDestinationResult(&sv.DestinationResult, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("InventoryTableConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryTableConfigurationResult(&sv.InventoryTableConfigurationResult, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("JournalTableConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentJournalTableConfigurationResult(&sv.JournalTableConfigurationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetadataTableConfigurationResult(v **types.MetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetadataTableConfigurationResult - if *v == nil { - sv = &types.MetadataTableConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3TablesDestinationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentS3TablesDestinationResult(&sv.S3TablesDestinationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Metrics - if *v == nil { - sv = &types.Metrics{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("EventThreshold", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.MetricsStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetricsAndOperator - if *v == nil { - sv = &types.MetricsAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessPointArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessPointArn = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetricsConfiguration - if *v == nil { - sv = &types.MetricsConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.MetricsConfiguration - if *v == nil { - sv = make([]types.MetricsConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.MetricsConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.MetricsConfiguration - if *v == nil { - sv = make([]types.MetricsConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.MetricsConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var uv types.MetricsFilter - var memberFound bool - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessPointArn", t.Name.Local): - var mv string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - uv = &types.MetricsFilterMemberAccessPointArn{Value: mv} - memberFound = true - - case strings.EqualFold("And", t.Name.Local): - var mv types.MetricsAndOperator - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.MetricsFilterMemberAnd{Value: mv} - memberFound = true - - case strings.EqualFold("Prefix", t.Name.Local): - var mv string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - uv = &types.MetricsFilterMemberPrefix{Value: mv} - memberFound = true - - case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.MetricsFilterMemberTag{Value: mv} - memberFound = true - - default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true - - } - decoder = originalDecoder - } - *v = uv - return nil -} - -func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MultipartUpload - if *v == nil { - sv = &types.MultipartUpload{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("Initiated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Initiated = ptr.Time(t) - } - - case strings.EqualFold("Initiator", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - case strings.EqualFold("UploadId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.MultipartUpload - if *v == nil { - sv = make([]types.MultipartUpload, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.MultipartUpload - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { - var sv []types.MultipartUpload - if *v == nil { - sv = make([]types.MultipartUpload, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.MultipartUpload - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoncurrentVersionExpiration - if *v == nil { - sv = &types.NoncurrentVersionExpiration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NoncurrentDays", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NoncurrentDays = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoncurrentVersionTransition - if *v == nil { - sv = &types.NoncurrentVersionTransition{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NoncurrentDays", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NoncurrentDays = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.TransitionStorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.NoncurrentVersionTransition - if *v == nil { - sv = make([]types.NoncurrentVersionTransition, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.NoncurrentVersionTransition - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { - var sv []types.NoncurrentVersionTransition - if *v == nil { - sv = make([]types.NoncurrentVersionTransition, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.NoncurrentVersionTransition - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoSuchBucket - if *v == nil { - sv = &types.NoSuchBucket{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoSuchKey - if *v == nil { - sv = &types.NoSuchKey{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoSuchUpload - if *v == nil { - sv = &types.NoSuchUpload{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NotFound - if *v == nil { - sv = &types.NotFound{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NotificationConfigurationFilter - if *v == nil { - sv = &types.NotificationConfigurationFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3Key", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Object - if *v == nil { - sv = &types.Object{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RestoreStatus", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.ObjectStorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectAlreadyInActiveTierError - if *v == nil { - sv = &types.ObjectAlreadyInActiveTierError{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Object - if *v == nil { - sv = make([]types.Object, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Object - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error { - var sv []types.Object - if *v == nil { - sv = make([]types.Object, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Object - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockConfiguration - if *v == nil { - sv = &types.ObjectLockConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectLockEnabled", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv) - } - - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockLegalHold - if *v == nil { - sv = &types.ObjectLockLegalHold{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ObjectLockLegalHoldStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockRetention - if *v == nil { - sv = &types.ObjectLockRetention{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Mode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Mode = types.ObjectLockRetentionMode(xtv) - } - - case strings.EqualFold("RetainUntilDate", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.RetainUntilDate = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockRule - if *v == nil { - sv = &types.ObjectLockRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DefaultRetention", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectNotInActiveTierError - if *v == nil { - sv = &types.ObjectNotInActiveTierError{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectPart - if *v == nil { - sv = &types.ObjectPart{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("PartNumber", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PartNumber = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectVersion - if *v == nil { - sv = &types.ObjectVersion{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("IsLatest", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) - } - sv.IsLatest = ptr.Bool(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RestoreStatus", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.ObjectVersionStorageClass(xtv) - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ObjectVersion - if *v == nil { - sv = make([]types.ObjectVersion, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ObjectVersion - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { - var sv []types.ObjectVersion - if *v == nil { - sv = make([]types.ObjectVersion, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ObjectVersion - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Owner - if *v == nil { - sv = &types.Owner{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DisplayName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DisplayName = ptr.String(xtv) - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.OwnershipControls - if *v == nil { - sv = &types.OwnershipControls{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.OwnershipControlsRule - if *v == nil { - sv = &types.OwnershipControlsRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectOwnership", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ObjectOwnership = types.ObjectOwnership(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.OwnershipControlsRule - if *v == nil { - sv = make([]types.OwnershipControlsRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.OwnershipControlsRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { - var sv []types.OwnershipControlsRule - if *v == nil { - sv = make([]types.OwnershipControlsRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.OwnershipControlsRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Part - if *v == nil { - sv = &types.Part{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("PartNumber", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PartNumber = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPartitionedPrefix(v **types.PartitionedPrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PartitionedPrefix - if *v == nil { - sv = &types.PartitionedPrefix{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PartitionDateSource", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.PartitionDateSource = types.PartitionDateSource(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Part - if *v == nil { - sv = make([]types.Part, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Part - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error { - var sv []types.Part - if *v == nil { - sv = make([]types.Part, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Part - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentPartsList(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ObjectPart - if *v == nil { - sv = make([]types.ObjectPart, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ObjectPart - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPartsListUnwrapped(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { - var sv []types.ObjectPart - if *v == nil { - sv = make([]types.ObjectPart, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ObjectPart - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PolicyStatus - if *v == nil { - sv = &types.PolicyStatus{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsPublic", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) - } - sv.IsPublic = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PublicAccessBlockConfiguration - if *v == nil { - sv = &types.PublicAccessBlockConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BlockPublicAcls", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.BlockPublicAcls = ptr.Bool(xtv) - } - - case strings.EqualFold("BlockPublicPolicy", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.BlockPublicPolicy = ptr.Bool(xtv) - } - - case strings.EqualFold("IgnorePublicAcls", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.IgnorePublicAcls = ptr.Bool(xtv) - } - - case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.RestrictPublicBuckets = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.QueueConfiguration - if *v == nil { - sv = &types.QueueConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Event", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("Queue", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.QueueArn = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.QueueConfiguration - if *v == nil { - sv = make([]types.QueueConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.QueueConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.QueueConfiguration - if *v == nil { - sv = make([]types.QueueConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.QueueConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentRecordExpiration(v **types.RecordExpiration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RecordExpiration - if *v == nil { - sv = &types.RecordExpiration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Expiration", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Expiration = types.ExpirationState(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Redirect - if *v == nil { - sv = &types.Redirect{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("HostName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HostName = ptr.String(xtv) - } - - case strings.EqualFold("HttpRedirectCode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HttpRedirectCode = ptr.String(xtv) - } - - case strings.EqualFold("Protocol", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Protocol = types.Protocol(xtv) - } - - case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ReplaceKeyPrefixWith = ptr.String(xtv) - } - - case strings.EqualFold("ReplaceKeyWith", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ReplaceKeyWith = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RedirectAllRequestsTo - if *v == nil { - sv = &types.RedirectAllRequestsTo{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("HostName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HostName = ptr.String(xtv) - } - - case strings.EqualFold("Protocol", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Protocol = types.Protocol(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicaModifications - if *v == nil { - sv = &types.ReplicaModifications{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ReplicaModificationsStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationConfiguration - if *v == nil { - sv = &types.ReplicationConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Role", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Role = ptr.String(xtv) - } - - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationRule - if *v == nil { - sv = &types.ReplicationRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DeleteMarkerReplication", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Destination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ExistingObjectReplication", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Priority", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Priority = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ReplicationRuleStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationRuleAndOperator - if *v == nil { - sv = &types.ReplicationRuleAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRuleFilter(v **types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationRuleFilter - if *v == nil { - sv = &types.ReplicationRuleFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&sv.And, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ReplicationRule - if *v == nil { - sv = make([]types.ReplicationRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ReplicationRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { - var sv []types.ReplicationRule - if *v == nil { - sv = make([]types.ReplicationRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ReplicationRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationTime - if *v == nil { - sv = &types.ReplicationTime{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ReplicationTimeStatus(xtv) - } - - case strings.EqualFold("Time", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationTimeValue - if *v == nil { - sv = &types.ReplicationTimeValue{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Minutes", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Minutes = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRestoreStatus(v **types.RestoreStatus, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RestoreStatus - if *v == nil { - sv = &types.RestoreStatus{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsRestoreInProgress", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsRestoreInProgress to be of type *bool, got %T instead", val) - } - sv.IsRestoreInProgress = ptr.Bool(xtv) - } - - case strings.EqualFold("RestoreExpiryDate", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.RestoreExpiryDate = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RoutingRule - if *v == nil { - sv = &types.RoutingRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Condition", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Redirect", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.RoutingRule - if *v == nil { - sv = make([]types.RoutingRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("RoutingRule", t.Name.Local): - var col types.RoutingRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { - var sv []types.RoutingRule - if *v == nil { - sv = make([]types.RoutingRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.RoutingRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.S3KeyFilter - if *v == nil { - sv = &types.S3KeyFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("FilterRule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentS3TablesDestinationResult(v **types.S3TablesDestinationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.S3TablesDestinationResult - if *v == nil { - sv = &types.S3TablesDestinationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TableArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableArn = ptr.String(xtv) - } - - case strings.EqualFold("TableBucketArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableBucketArn = ptr.String(xtv) - } - - case strings.EqualFold("TableName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableName = ptr.String(xtv) - } - - case strings.EqualFold("TableNamespace", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableNamespace = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ServerSideEncryptionByDefault - if *v == nil { - sv = &types.ServerSideEncryptionByDefault{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("KMSMasterKeyID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KMSMasterKeyID = ptr.String(xtv) - } - - case strings.EqualFold("SSEAlgorithm", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SSEAlgorithm = types.ServerSideEncryption(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ServerSideEncryptionConfiguration - if *v == nil { - sv = &types.ServerSideEncryptionConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ServerSideEncryptionRule - if *v == nil { - sv = &types.ServerSideEncryptionRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("BucketKeyEnabled", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) - } - sv.BucketKeyEnabled = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ServerSideEncryptionRule - if *v == nil { - sv = make([]types.ServerSideEncryptionRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ServerSideEncryptionRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { - var sv []types.ServerSideEncryptionRule - if *v == nil { - sv = make([]types.ServerSideEncryptionRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ServerSideEncryptionRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentSessionCredentials(v **types.SessionCredentials, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SessionCredentials - if *v == nil { - sv = &types.SessionCredentials{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessKeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessKeyId = ptr.String(xtv) - } - - case strings.EqualFold("Expiration", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Expiration = ptr.Time(t) - } - - case strings.EqualFold("SecretAccessKey", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SecretAccessKey = ptr.String(xtv) - } - - case strings.EqualFold("SessionToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SessionToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSimplePrefix(v **types.SimplePrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SimplePrefix - if *v == nil { - sv = &types.SimplePrefix{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SourceSelectionCriteria - if *v == nil { - sv = &types.SourceSelectionCriteria{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ReplicaModifications", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SSEKMS - if *v == nil { - sv = &types.SSEKMS{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("KeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SseKmsEncryptedObjects - if *v == nil { - sv = &types.SseKmsEncryptedObjects{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.SseKmsEncryptedObjectsStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SSES3 - if *v == nil { - sv = &types.SSES3{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.StorageClassAnalysis - if *v == nil { - sv = &types.StorageClassAnalysis{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DataExport", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.StorageClassAnalysisDataExport - if *v == nil { - sv = &types.StorageClassAnalysisDataExport{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Destination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("OutputSchemaVersion", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Tag - if *v == nil { - sv = &types.Tag{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Value", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Value = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Tag - if *v == nil { - sv = make([]types.Tag, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Tag", t.Name.Local): - var col types.Tag - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { - var sv []types.Tag - if *v == nil { - sv = make([]types.Tag, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Tag - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TargetGrant - if *v == nil { - sv = &types.TargetGrant{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Grantee", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Permission", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Permission = types.BucketLogsPermission(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.TargetGrant - if *v == nil { - sv = make([]types.TargetGrant, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Grant", t.Name.Local): - var col types.TargetGrant - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { - var sv []types.TargetGrant - if *v == nil { - sv = make([]types.TargetGrant, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.TargetGrant - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTargetObjectKeyFormat(v **types.TargetObjectKeyFormat, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TargetObjectKeyFormat - if *v == nil { - sv = &types.TargetObjectKeyFormat{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PartitionedPrefix", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPartitionedPrefix(&sv.PartitionedPrefix, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SimplePrefix", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSimplePrefix(&sv.SimplePrefix, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Tiering - if *v == nil { - sv = &types.Tiering{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessTier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessTier = types.IntelligentTieringAccessTier(xtv) - } - - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Tiering - if *v == nil { - sv = make([]types.Tiering, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Tiering - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { - var sv []types.Tiering - if *v == nil { - sv = make([]types.Tiering, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Tiering - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTooManyParts(v **types.TooManyParts, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TooManyParts - if *v == nil { - sv = &types.TooManyParts{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TopicConfiguration - if *v == nil { - sv = &types.TopicConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Event", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("Topic", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TopicArn = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.TopicConfiguration - if *v == nil { - sv = make([]types.TopicConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.TopicConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.TopicConfiguration - if *v == nil { - sv = make([]types.TopicConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.TopicConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Transition - if *v == nil { - sv = &types.Transition{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Date", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Date = ptr.Time(t) - } - - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.TransitionStorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Transition - if *v == nil { - sv = make([]types.Transition, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Transition - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { - var sv []types.Transition - if *v == nil { - sv = make([]types.Transition, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Transition - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go deleted file mode 100644 index d825a41a7795..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package s3 provides the API client, operations, and parameter types for Amazon -// Simple Storage Service. -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go deleted file mode 100644 index bb5f4cf47af2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go +++ /dev/null @@ -1,126 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - smithyauth "github.com/aws/smithy-go/auth" -) - -type endpointAuthResolver struct { - EndpointResolver EndpointResolverV2 -} - -var _ AuthSchemeResolver = (*endpointAuthResolver)(nil) - -func (r *endpointAuthResolver) ResolveAuthSchemes( - ctx context.Context, params *AuthResolverParameters, -) ( - []*smithyauth.Option, error, -) { - if params.endpointParams.Region == nil { - // #2502: We're correcting the endpoint binding behavior to treat empty - // Region as "unset" (nil), but auth resolution technically doesn't - // care and someone could be using V1 or non-default V2 endpoint - // resolution, both of which would bypass the required-region check. - // They shouldn't be broken because the region is technically required - // by this service's endpoint-based auth resolver, so we stub it here. - params.endpointParams.Region = aws.String("") - } - - opts, err := r.resolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - // canonicalize sigv4-s3express ID - for _, opt := range opts { - if opt.SchemeID == "sigv4-s3express" { - opt.SchemeID = "com.amazonaws.s3#sigv4express" - } - } - - // preserve pre-SRA behavior where everything technically had anonymous - return append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }), nil -} - -func (r *endpointAuthResolver) resolveAuthSchemes( - ctx context.Context, params *AuthResolverParameters, -) ( - []*smithyauth.Option, error, -) { - baseOpts, err := (&defaultAuthSchemeResolver{}).ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, fmt.Errorf("get base options: %w", err) - } - - endpt, err := r.EndpointResolver.ResolveEndpoint(ctx, *params.endpointParams) - if err != nil { - return nil, fmt.Errorf("resolve endpoint: %w", err) - } - - endptOpts, ok := smithyauth.GetAuthOptions(&endpt.Properties) - if !ok { - return baseOpts, nil - } - - // the list of options from the endpoint is authoritative, however, the - // modeled options have some properties that the endpoint ones don't, so we - // start from the latter and merge in - for _, endptOpt := range endptOpts { - if baseOpt := findScheme(baseOpts, endptOpt.SchemeID); baseOpt != nil { - rebaseProps(endptOpt, baseOpt) - } - } - - return endptOpts, nil -} - -// rebase the properties of dst, taking src as the base and overlaying those -// from dst -func rebaseProps(dst, src *smithyauth.Option) { - iprops, sprops := src.IdentityProperties, src.SignerProperties - - iprops.SetAll(&dst.IdentityProperties) - sprops.SetAll(&dst.SignerProperties) - - dst.IdentityProperties = iprops - dst.SignerProperties = sprops -} - -func findScheme(opts []*smithyauth.Option, schemeID string) *smithyauth.Option { - for _, opt := range opts { - if opt.SchemeID == schemeID { - return opt - } - } - return nil -} - -func finalizeServiceEndpointAuthResolver(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &endpointAuthResolver{ - EndpointResolver: options.EndpointResolverV2, - } -} - -func finalizeOperationEndpointAuthResolver(options *Options) { - resolver, ok := options.AuthSchemeResolver.(*endpointAuthResolver) - if !ok { - return - } - - if resolver.EndpointResolver == options.EndpointResolverV2 { - return - } - - options.AuthSchemeResolver = &endpointAuthResolver{ - EndpointResolver: options.EndpointResolverV2, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go deleted file mode 100644 index 9734372c4ed4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ /dev/null @@ -1,7644 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/endpoints/private/rulesfn" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "s3" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - - if options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - if options.UseDualstack { - options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled - } else { - options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_S3") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "S3", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The S3 bucket used to send the request. This is an optional parameter that will - // be set automatically for operations that are scoped to an S3 bucket. - // - // Parameter - // is required. - Bucket *string - - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string - - // When true, force a path-style endpoint to be used where the bucket name is part - // of the path. - // - // Defaults to false if no value is - // provided. - // - // AWS::S3::ForcePathStyle - ForcePathStyle *bool - - // When true, use S3 Accelerate. NOTE: Not all regions support S3 - // accelerate. - // - // Defaults to false if no value is provided. - // - // AWS::S3::Accelerate - Accelerate *bool - - // Whether the global endpoint should be used, rather then the regional endpoint - // for us-east-1. - // - // Defaults to false if no value is - // provided. - // - // AWS::S3::UseGlobalEndpoint - UseGlobalEndpoint *bool - - // Internal parameter to use object lambda endpoint for an operation (eg: - // WriteGetObjectResponse) - // - // Parameter is required. - UseObjectLambdaEndpoint *bool - - // The S3 Key used to send the request. This is an optional parameter that will be - // set automatically for operations that are scoped to an S3 Key. - // - // Parameter is - // required. - Key *string - - // The S3 Prefix used to send the request. This is an optional parameter that will - // be set automatically for operations that are scoped to an S3 Prefix. - // - // Parameter - // is required. - Prefix *string - - // The Copy Source used for Copy Object request. This is an optional parameter that - // will be set automatically for operations that are scoped to Copy - // Source. - // - // Parameter is required. - CopySource *string - - // Internal parameter to disable Access Point Buckets - // - // Parameter is required. - DisableAccessPoints *bool - - // Whether multi-region access points (MRAP) should be disabled. - // - // Defaults to false - // if no value is provided. - // - // AWS::S3::DisableMultiRegionAccessPoints - DisableMultiRegionAccessPoints *bool - - // When an Access Point ARN is provided and this flag is enabled, the SDK MUST use - // the ARN's region when constructing the endpoint instead of the client's - // configured region. - // - // Parameter is required. - // - // AWS::S3::UseArnRegion - UseArnRegion *bool - - // Internal parameter to indicate whether S3Express operation should use control - // plane, (ex. CreateBucket) - // - // Parameter is required. - UseS3ExpressControlEndpoint *bool - - // Parameter to indicate whether S3Express session auth should be - // disabled - // - // Parameter is required. - DisableS3ExpressSessionAuth *bool -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.Accelerate == nil { - return fmt.Errorf("parameter Accelerate is required") - } - - if p.DisableMultiRegionAccessPoints == nil { - return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required") - } - - if p.ForcePathStyle == nil { - return fmt.Errorf("parameter ForcePathStyle is required") - } - - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - if p.UseGlobalEndpoint == nil { - return fmt.Errorf("parameter UseGlobalEndpoint is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.Accelerate == nil { - p.Accelerate = ptr.Bool(false) - } - - if p.DisableMultiRegionAccessPoints == nil { - p.DisableMultiRegionAccessPoints = ptr.Bool(false) - } - - if p.ForcePathStyle == nil { - p.ForcePathStyle = ptr.Bool(false) - } - - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - - if p.UseGlobalEndpoint == nil { - p.UseGlobalEndpoint = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseFIPS := *params.UseFIPS - _UseDualStack := *params.UseDualStack - _ForcePathStyle := *params.ForcePathStyle - _Accelerate := *params.Accelerate - _UseGlobalEndpoint := *params.UseGlobalEndpoint - _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints - - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if _Accelerate == true { - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS") - } - } - if _UseDualStack == true { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate") - } - } - if _UseFIPS == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _partitionResult.Name == "aws-cn" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS") - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := rulesfn.SubString(_Bucket, 0, 6, true); exprVal != nil { - _bucketSuffix := *exprVal - _ = _bucketSuffix - if _bucketSuffix == "--x-s3" { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if _url.IsIp == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - if _url.IsIp == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { - _UseS3ExpressControlEndpoint := *exprVal - _ = _UseS3ExpressControlEndpoint - if _UseS3ExpressControlEndpoint == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - if !(params.Endpoint != nil) { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { - _accessPointSuffix := *exprVal - _ = _accessPointSuffix - if _accessPointSuffix == "--xa-s3" { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if _url.IsIp == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - if _url.IsIp == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if exprVal := rulesfn.SubString(_Bucket, 7, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 16, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 16, 18, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 20, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 21, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 21, 23, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 27, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 27, 29, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 16, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 16, 18, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 20, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 21, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 21, 23, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 7, 27, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 27, 29, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - } - if !(params.Bucket != nil) { - if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { - _UseS3ExpressControlEndpoint := *exprVal - _ = _UseS3ExpressControlEndpoint - if _UseS3ExpressControlEndpoint == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := rulesfn.SubString(_Bucket, 49, 50, true); exprVal != nil { - _hardwareType := *exprVal - _ = _hardwareType - if exprVal := rulesfn.SubString(_Bucket, 8, 12, true); exprVal != nil { - _regionPrefix := *exprVal - _ = _regionPrefix - if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { - _bucketAliasSuffix := *exprVal - _ = _bucketAliasSuffix - if exprVal := rulesfn.SubString(_Bucket, 32, 49, true); exprVal != nil { - _outpostId := *exprVal - _ = _outpostId - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _regionPartition := *exprVal - _ = _regionPartition - if _bucketAliasSuffix == "--op-s3" { - if rulesfn.IsValidHostLabel(_outpostId, false) { - if _hardwareType == "e" { - if _regionPrefix == "beta" { - if !(params.Endpoint != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".ec2.") - out.WriteString(_url.Authority) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".ec2.s3-outposts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_regionPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _hardwareType == "o" { - if _regionPrefix == "beta" { - if !(params.Endpoint != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".op-") - out.WriteString(_outpostId) - out.WriteString(".") - out.WriteString(_url.Authority) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".op-") - out.WriteString(_outpostId) - out.WriteString(".s3-outposts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_regionPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Unrecognized hardware type: \"Expected hardware type o or e but got ") - out.WriteString(_hardwareType) - out.WriteString("\"") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.") - } - } - } - } - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if !(rulesfn.ParseURL(_Endpoint) != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Custom endpoint `") - out.WriteString(_Endpoint) - out.WriteString("` was not a valid URI") - return out.String() - }()) - } - } - if _ForcePathStyle == false { - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, false) { - if _Accelerate == true { - if _partitionResult.Name == "aws-cn" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Accelerate cannot be used in this region") - } - } - if _UseDualStack == true { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.dualstack.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.dualstack.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == true { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.Scheme == "http" { - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, true) { - if _ForcePathStyle == false { - if _UseFIPS == false { - if _UseDualStack == false { - if _Accelerate == false { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - } - } - } - } - } - } - if _ForcePathStyle == false { - if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _bucketArn := *exprVal - _ = _bucketArn - if exprVal := _bucketArn.ResourceId.Get(0); exprVal != nil { - _arnType := *exprVal - _ = _arnType - if !(_arnType == "") { - if _bucketArn.Service == "s3-object-lambda" { - if _arnType == "accesspoint" { - if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { - _accessPointName := *exprVal - _ = _accessPointName - if !(_accessPointName == "") { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") - } - if !(_bucketArn.Region == "") { - if exprVal := params.DisableAccessPoints; exprVal != nil { - _DisableAccessPoints := *exprVal - _ = _DisableAccessPoints - if _DisableAccessPoints == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") - } - } - if !(_bucketArn.ResourceId.Get(2) != nil) { - if exprVal := params.UseArnRegion; exprVal != nil { - _UseArnRegion := *exprVal - _ = _UseArnRegion - if _UseArnRegion == false { - if !(_bucketArn.Region == _Region) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid configuration: region from ARN `") - out.WriteString(_bucketArn.Region) - out.WriteString("` does not match client region `") - out.WriteString(_Region) - out.WriteString("` and UseArnRegion is `false`") - return out.String() - }()) - } - } - } - if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { - _bucketPartition := *exprVal - _ = _bucketPartition - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _bucketPartition.Name == _partitionResult.Name { - if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { - if _bucketArn.AccountId == "" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Missing account id") - } - if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { - if rulesfn.IsValidHostLabel(_accessPointName, false) { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-object-lambda-fips.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-object-lambda.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_accessPointName) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_bucketArn.AccountId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid region in ARN: `") - out.WriteString(_bucketArn.Region) - out.WriteString("` (invalid DNS name)") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_partitionResult.Name) - out.WriteString("` but ARN (`") - out.WriteString(_Bucket) - out.WriteString("`) has `") - out.WriteString(_bucketPartition.Name) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: bucket ARN is missing a region") - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `") - out.WriteString(_arnType) - out.WriteString("`") - return out.String() - }()) - } - if _arnType == "accesspoint" { - if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { - _accessPointName := *exprVal - _ = _accessPointName - if !(_accessPointName == "") { - if !(_bucketArn.Region == "") { - if _arnType == "accesspoint" { - if !(_bucketArn.Region == "") { - if exprVal := params.DisableAccessPoints; exprVal != nil { - _DisableAccessPoints := *exprVal - _ = _DisableAccessPoints - if _DisableAccessPoints == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") - } - } - if !(_bucketArn.ResourceId.Get(2) != nil) { - if exprVal := params.UseArnRegion; exprVal != nil { - _UseArnRegion := *exprVal - _ = _UseArnRegion - if _UseArnRegion == false { - if !(_bucketArn.Region == _Region) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid configuration: region from ARN `") - out.WriteString(_bucketArn.Region) - out.WriteString("` does not match client region `") - out.WriteString(_Region) - out.WriteString("` and UseArnRegion is `false`") - return out.String() - }()) - } - } - } - if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { - _bucketPartition := *exprVal - _ = _bucketPartition - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _bucketPartition.Name == _partitionResult.Name { - if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { - if _bucketArn.Service == "s3" { - if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { - if rulesfn.IsValidHostLabel(_accessPointName, false) { - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Access Points do not support S3 Accelerate") - } - if _UseFIPS == true { - if _UseDualStack == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint-fips.dualstack.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - if _UseDualStack == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint-fips.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == false { - if _UseDualStack == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint.dualstack.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_accessPointName) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_bucketArn.AccountId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The ARN was not for the S3 service, found: ") - out.WriteString(_bucketArn.Service) - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid region in ARN: `") - out.WriteString(_bucketArn.Region) - out.WriteString("` (invalid DNS name)") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_partitionResult.Name) - out.WriteString("` but ARN (`") - out.WriteString(_Bucket) - out.WriteString("`) has `") - out.WriteString(_bucketPartition.Name) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - if rulesfn.IsValidHostLabel(_accessPointName, true) { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support dual-stack") - } - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support FIPS") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support S3 Accelerate") - } - if _DisableMultiRegionAccessPoints == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid configuration: Multi-Region Access Point ARNs are disabled.") - } - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _mrapPartition := *exprVal - _ = _mrapPartition - if _mrapPartition.Name == _bucketArn.Partition { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString(".accesspoint.s3-global.") - out.WriteString(_mrapPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_mrapPartition.Name) - out.WriteString("` but bucket referred to partition `") - out.WriteString(_bucketArn.Partition) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Access Point Name") - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") - } - if _bucketArn.Service == "s3-outposts" { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support Dual-stack") - } - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support FIPS") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") - } - if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { - _var_321 := *exprVal - _ = _var_321 - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") - } - if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { - _outpostId := *exprVal - _ = _outpostId - if rulesfn.IsValidHostLabel(_outpostId, false) { - if exprVal := params.UseArnRegion; exprVal != nil { - _UseArnRegion := *exprVal - _ = _UseArnRegion - if _UseArnRegion == false { - if !(_bucketArn.Region == _Region) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid configuration: region from ARN `") - out.WriteString(_bucketArn.Region) - out.WriteString("` does not match client region `") - out.WriteString(_Region) - out.WriteString("` and UseArnRegion is `false`") - return out.String() - }()) - } - } - } - if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { - _bucketPartition := *exprVal - _ = _bucketPartition - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _bucketPartition.Name == _partitionResult.Name { - if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { - if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { - if exprVal := _bucketArn.ResourceId.Get(2); exprVal != nil { - _outpostType := *exprVal - _ = _outpostType - if exprVal := _bucketArn.ResourceId.Get(3); exprVal != nil { - _accessPointName := *exprVal - _ = _accessPointName - if _outpostType == "accesspoint" { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_outpostId) - out.WriteString(".") - out.WriteString(_url.Authority) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_outpostId) - out.WriteString(".s3-outposts.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Expected an outpost type `accesspoint`, found ") - out.WriteString(_outpostType) - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: expected an access point name") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a 4-component resource") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_bucketArn.AccountId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid region in ARN: `") - out.WriteString(_bucketArn.Region) - out.WriteString("` (invalid DNS name)") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_partitionResult.Name) - out.WriteString("` but ARN (`") - out.WriteString(_Bucket) - out.WriteString("`) has `") - out.WriteString(_bucketPartition.Name) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_outpostId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The Outpost Id was not set") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: Unrecognized format: ") - out.WriteString(_Bucket) - out.WriteString(" (type: ") - out.WriteString(_arnType) - out.WriteString(")") - return out.String() - }()) - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: No ARN type specified") - } - } - if exprVal := rulesfn.SubString(_Bucket, 0, 4, false); exprVal != nil { - _arnPrefix := *exprVal - _ = _arnPrefix - if _arnPrefix == "arn:" { - if !(awsrulesfn.ParseARN(_Bucket) != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: `") - out.WriteString(_Bucket) - out.WriteString("` was not a valid ARN") - return out.String() - }()) - } - } - } - if _ForcePathStyle == true { - if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _var_334 := *exprVal - _ = _var_334 - return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") - } - } - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _Accelerate == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _UseFIPS == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with S3 Accelerate") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - if exprVal := params.UseObjectLambdaEndpoint; exprVal != nil { - _UseObjectLambdaEndpoint := *exprVal - _ = _UseObjectLambdaEndpoint - if _UseObjectLambdaEndpoint == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, true) { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-object-lambda-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-object-lambda.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if !(params.Bucket != nil) { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, true) { - if _UseFIPS == true { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "A region must be set when sending requests to S3.") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - params.ForcePathStyle = aws.Bool(options.UsePathStyle) - params.Accelerate = aws.Bool(options.UseAccelerate) - params.DisableMultiRegionAccessPoints = aws.Bool(options.DisableMultiRegionAccessPoints) - params.UseArnRegion = aws.Bool(options.UseARNRegion) - - params.DisableS3ExpressSessionAuth = options.DisableS3ExpressSessionAuth - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - ctx = setS3ResolvedURI(ctx, endpt.URI.String()) - - backend := s3cust.GetPropertiesBackend(&endpt.Properties) - ctx = internalcontext.SetS3Backend(ctx, backend) - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go deleted file mode 100644 index d6cdb533727c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go +++ /dev/null @@ -1,285 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithysync "github.com/aws/smithy-go/sync" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "sync" -) - -// SelectObjectContentEventStreamReader provides the interface for reading events -// from a stream. -// -// The writer's Close method must allow multiple concurrent calls. -type SelectObjectContentEventStreamReader interface { - Events() <-chan types.SelectObjectContentEventStream - Close() error - Err() error -} - -type selectObjectContentEventStreamReader struct { - stream chan types.SelectObjectContentEventStream - decoder *eventstream.Decoder - eventStream io.ReadCloser - err *smithysync.OnceErr - payloadBuf []byte - done chan struct{} - closeOnce sync.Once -} - -func newSelectObjectContentEventStreamReader(readCloser io.ReadCloser, decoder *eventstream.Decoder) *selectObjectContentEventStreamReader { - w := &selectObjectContentEventStreamReader{ - stream: make(chan types.SelectObjectContentEventStream), - decoder: decoder, - eventStream: readCloser, - err: smithysync.NewOnceErr(), - done: make(chan struct{}), - payloadBuf: make([]byte, 10*1024), - } - - go w.readEventStream() - - return w -} - -func (r *selectObjectContentEventStreamReader) Events() <-chan types.SelectObjectContentEventStream { - return r.stream -} - -func (r *selectObjectContentEventStreamReader) readEventStream() { - defer r.Close() - defer close(r.stream) - - for { - r.payloadBuf = r.payloadBuf[0:0] - decodedMessage, err := r.decoder.Decode(r.eventStream, r.payloadBuf) - if err != nil { - if err == io.EOF { - return - } - select { - case <-r.done: - return - default: - r.err.SetError(err) - return - } - } - - event, err := r.deserializeEventMessage(&decodedMessage) - if err != nil { - r.err.SetError(err) - return - } - - select { - case r.stream <- event: - case <-r.done: - return - } - - } -} - -func (r *selectObjectContentEventStreamReader) deserializeEventMessage(msg *eventstream.Message) (types.SelectObjectContentEventStream, error) { - messageType := msg.Headers.Get(eventstreamapi.MessageTypeHeader) - if messageType == nil { - return nil, fmt.Errorf("%s event header not present", eventstreamapi.MessageTypeHeader) - } - - switch messageType.String() { - case eventstreamapi.EventMessageType: - var v types.SelectObjectContentEventStream - if err := awsRestxml_deserializeEventStreamSelectObjectContentEventStream(&v, msg); err != nil { - return nil, err - } - return v, nil - - case eventstreamapi.ExceptionMessageType: - return nil, awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg) - - case eventstreamapi.ErrorMessageType: - errorCode := "UnknownError" - errorMessage := errorCode - if header := msg.Headers.Get(eventstreamapi.ErrorCodeHeader); header != nil { - errorCode = header.String() - } - if header := msg.Headers.Get(eventstreamapi.ErrorMessageHeader); header != nil { - errorMessage = header.String() - } - return nil, &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - - default: - mc := msg.Clone() - return nil, &UnknownEventMessageError{ - Type: messageType.String(), - Message: &mc, - } - - } -} - -func (r *selectObjectContentEventStreamReader) ErrorSet() <-chan struct{} { - return r.err.ErrorSet() -} - -func (r *selectObjectContentEventStreamReader) Close() error { - r.closeOnce.Do(r.safeClose) - return r.Err() -} - -func (r *selectObjectContentEventStreamReader) safeClose() { - close(r.done) - r.eventStream.Close() - -} - -func (r *selectObjectContentEventStreamReader) Err() error { - return r.err.Err() -} - -func (r *selectObjectContentEventStreamReader) Closed() <-chan struct{} { - return r.done -} - -type awsRestxml_deserializeOpEventStreamSelectObjectContent struct { - LogEventStreamWrites bool - LogEventStreamReads bool -} - -func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) ID() string { - return "OperationEventStreamDeserializer" -} - -func (m *awsRestxml_deserializeOpEventStreamSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - defer func() { - if err == nil { - return - } - m.closeResponseBody(out) - }() - - logger := middleware.GetLogger(ctx) - - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) - } - _ = request - - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - deserializeOutput, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) - } - _ = deserializeOutput - - output, ok := out.Result.(*SelectObjectContentOutput) - if out.Result != nil && !ok { - return out, metadata, fmt.Errorf("unexpected output result type: %T", out.Result) - } else if out.Result == nil { - output = &SelectObjectContentOutput{} - out.Result = output - } - - eventReader := newSelectObjectContentEventStreamReader( - deserializeOutput.Body, - eventstream.NewDecoder(func(options *eventstream.DecoderOptions) { - options.Logger = logger - options.LogMessages = m.LogEventStreamReads - - }), - ) - defer func() { - if err == nil { - return - } - _ = eventReader.Close() - }() - - output.eventStream = NewSelectObjectContentEventStream(func(stream *SelectObjectContentEventStream) { - stream.Reader = eventReader - }) - - go output.eventStream.waitStreamClose() - - return out, metadata, nil -} - -func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) closeResponseBody(out middleware.DeserializeOutput) { - if resp, ok := out.RawResponse.(*smithyhttp.Response); ok && resp != nil && resp.Body != nil { - _, _ = io.Copy(ioutil.Discard, resp.Body) - _ = resp.Body.Close() - } -} - -func addEventStreamSelectObjectContentMiddleware(stack *middleware.Stack, options Options) error { - if err := stack.Deserialize.Insert(&awsRestxml_deserializeOpEventStreamSelectObjectContent{ - LogEventStreamWrites: options.ClientLogMode.IsRequestEventMessage(), - LogEventStreamReads: options.ClientLogMode.IsResponseEventMessage(), - }, "OperationDeserializer", middleware.Before); err != nil { - return err - } - return nil - -} - -// UnknownEventMessageError provides an error when a message is received from the stream, -// but the reader is unable to determine what kind of message it is. -type UnknownEventMessageError struct { - Type string - Message *eventstream.Message -} - -// Error retruns the error message string. -func (e *UnknownEventMessageError) Error() string { - return "unknown event stream message type, " + e.Type -} - -func setSafeEventStreamClientLogMode(o *Options, operation string) { - switch operation { - case "SelectObjectContent": - toggleEventStreamClientLogMode(o, false, true) - return - - default: - return - - } -} -func toggleEventStreamClientLogMode(o *Options, request, response bool) { - mode := o.ClientLogMode - - if request && mode.IsRequestWithBody() { - mode.ClearRequestWithBody() - mode |= aws.LogRequest - } - - if response && mode.IsResponseWithBody() { - mode.ClearResponseWithBody() - mode |= aws.LogResponse - } - - o.ClientLogMode = mode - -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go deleted file mode 100644 index bbac9ca270e1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go +++ /dev/null @@ -1,9 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" -) - -// ExpressCredentialsProvider retrieves credentials for operations against the -// S3Express storage class. -type ExpressCredentialsProvider = customizations.S3ExpressCredentialsProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go deleted file mode 100644 index 3b35a3e57484..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go +++ /dev/null @@ -1,170 +0,0 @@ -package s3 - -import ( - "context" - "crypto/hmac" - "crypto/sha256" - "errors" - "fmt" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" - "github.com/aws/smithy-go/container/private/cache" - "github.com/aws/smithy-go/container/private/cache/lru" -) - -const s3ExpressCacheCap = 100 - -const s3ExpressRefreshWindow = 1 * time.Minute - -type cacheKey struct { - CredentialsHash string // hmac(sigv4 akid, sigv4 secret) - Bucket string -} - -func (c cacheKey) Slug() string { - return fmt.Sprintf("%s%s", c.CredentialsHash, c.Bucket) -} - -type sessionCredsCache struct { - mu sync.Mutex - cache cache.Cache -} - -func (c *sessionCredsCache) Get(key cacheKey) (*aws.Credentials, bool) { - c.mu.Lock() - defer c.mu.Unlock() - - if v, ok := c.cache.Get(key); ok { - return v.(*aws.Credentials), true - } - return nil, false -} - -func (c *sessionCredsCache) Put(key cacheKey, creds *aws.Credentials) { - c.mu.Lock() - defer c.mu.Unlock() - - c.cache.Put(key, creds) -} - -// The default S3Express provider uses an LRU cache with a capacity of 100. -// -// Credentials will be refreshed asynchronously when a Retrieve() call is made -// for cached credentials within an expiry window (1 minute, currently -// non-configurable). -type defaultS3ExpressCredentialsProvider struct { - sf singleflight.Group - - client createSessionAPIClient - cache *sessionCredsCache - refreshWindow time.Duration - v4creds aws.CredentialsProvider // underlying credentials used for CreateSession -} - -type createSessionAPIClient interface { - CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error) -} - -func newDefaultS3ExpressCredentialsProvider() *defaultS3ExpressCredentialsProvider { - return &defaultS3ExpressCredentialsProvider{ - cache: &sessionCredsCache{ - cache: lru.New(s3ExpressCacheCap), - }, - refreshWindow: s3ExpressRefreshWindow, - } -} - -// returns a cloned provider using new base credentials, used when per-op -// config mutations change the credentials provider -func (p *defaultS3ExpressCredentialsProvider) CloneWithBaseCredentials(v4creds aws.CredentialsProvider) *defaultS3ExpressCredentialsProvider { - return &defaultS3ExpressCredentialsProvider{ - client: p.client, - cache: p.cache, - refreshWindow: p.refreshWindow, - v4creds: v4creds, - } -} - -func (p *defaultS3ExpressCredentialsProvider) Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) { - v4creds, err := p.v4creds.Retrieve(ctx) - if err != nil { - return aws.Credentials{}, fmt.Errorf("get sigv4 creds: %w", err) - } - - key := cacheKey{ - CredentialsHash: gethmac(v4creds.AccessKeyID, v4creds.SecretAccessKey), - Bucket: bucket, - } - creds, ok := p.cache.Get(key) - if !ok || creds.Expired() { - return p.awaitDoChanRetrieve(ctx, key) - } - - if creds.Expires.Sub(sdk.NowTime()) <= p.refreshWindow { - p.doChanRetrieve(ctx, key) - } - - return *creds, nil -} - -func (p *defaultS3ExpressCredentialsProvider) doChanRetrieve(ctx context.Context, key cacheKey) <-chan singleflight.Result { - return p.sf.DoChan(key.Slug(), func() (interface{}, error) { - return p.retrieve(ctx, key) - }) -} - -func (p *defaultS3ExpressCredentialsProvider) awaitDoChanRetrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { - ch := p.doChanRetrieve(ctx, key) - - select { - case r := <-ch: - return r.Val.(aws.Credentials), r.Err - case <-ctx.Done(): - return aws.Credentials{}, errors.New("s3express retrieve credentials canceled") - } -} - -func (p *defaultS3ExpressCredentialsProvider) retrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { - resp, err := p.client.CreateSession(ctx, &CreateSessionInput{ - Bucket: aws.String(key.Bucket), - }) - if err != nil { - return aws.Credentials{}, err - } - - creds, err := credentialsFromResponse(resp) - if err != nil { - return aws.Credentials{}, err - } - - p.cache.Put(key, creds) - return *creds, nil -} - -func credentialsFromResponse(o *CreateSessionOutput) (*aws.Credentials, error) { - if o.Credentials == nil { - return nil, errors.New("s3express session credentials unset") - } - - if o.Credentials.AccessKeyId == nil || o.Credentials.SecretAccessKey == nil || o.Credentials.SessionToken == nil || o.Credentials.Expiration == nil { - return nil, errors.New("s3express session credentials missing one or more required fields") - } - - return &aws.Credentials{ - AccessKeyID: *o.Credentials.AccessKeyId, - SecretAccessKey: *o.Credentials.SecretAccessKey, - SessionToken: *o.Credentials.SessionToken, - CanExpire: true, - Expires: *o.Credentials.Expiration, - }, nil -} - -func gethmac(p, key string) string { - hash := hmac.New(sha256.New, []byte(key)) - hash.Write([]byte(p)) - return string(hash.Sum(nil)) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go deleted file mode 100644 index 7c7a7b424008..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go +++ /dev/null @@ -1,39 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" -) - -// If the caller hasn't provided an S3Express provider, we use our default -// which will grab a reference to the S3 client itself in finalization. -func resolveExpressCredentials(o *Options) { - if o.ExpressCredentials == nil { - o.ExpressCredentials = newDefaultS3ExpressCredentialsProvider() - } -} - -// Config finalizer: if we're using the default S3Express implementation, grab -// a reference to the client for its CreateSession API, and the underlying -// sigv4 credentials provider for cache keying. -func finalizeExpressCredentials(o *Options, c *Client) { - if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { - p.client = c - p.v4creds = o.Credentials - } -} - -// Operation config finalizer: update the sigv4 credentials on the default -// express provider in case it changed to ensure different cache keys -func finalizeOperationExpressCredentials(o *Options, c Client) { - if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { - o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials) - } -} - -// NewFromConfig resolver: pull from opaque sources if it exists. -func resolveDisableExpressAuth(cfg aws.Config, o *Options) { - if v, ok := customizations.ResolveDisableExpressAuth(cfg.ConfigSources); ok { - o.DisableS3ExpressSessionAuth = &v - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go deleted file mode 100644 index a9b54535bdef..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go +++ /dev/null @@ -1,43 +0,0 @@ -package s3 - -import ( - "context" - "strings" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" -) - -// isExpressUserAgent tracks whether the caller is using S3 Express -// -// we can only derive this at runtime, so the middleware needs to hold a handle -// to the underlying user-agent manipulator to set the feature flag as -// necessary -type isExpressUserAgent struct { - ua *awsmiddleware.RequestUserAgent -} - -func (*isExpressUserAgent) ID() string { - return "isExpressUserAgent" -} - -func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - const expressSuffix = "--x-s3" - - bucket, ok := bucketFromInput(in.Parameters) - if ok && strings.HasSuffix(bucket, expressSuffix) { - m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket) - } - return next.HandleSerialize(ctx, in) -} - -func addIsExpressUserAgent(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json deleted file mode 100644 index 6268fcb7ecde..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/v4a": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", - "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_AbortMultipartUpload.go", - "api_op_CompleteMultipartUpload.go", - "api_op_CopyObject.go", - "api_op_CreateBucket.go", - "api_op_CreateBucketMetadataConfiguration.go", - "api_op_CreateBucketMetadataTableConfiguration.go", - "api_op_CreateMultipartUpload.go", - "api_op_CreateSession.go", - "api_op_DeleteBucket.go", - "api_op_DeleteBucketAnalyticsConfiguration.go", - "api_op_DeleteBucketCors.go", - "api_op_DeleteBucketEncryption.go", - "api_op_DeleteBucketIntelligentTieringConfiguration.go", - "api_op_DeleteBucketInventoryConfiguration.go", - "api_op_DeleteBucketLifecycle.go", - "api_op_DeleteBucketMetadataConfiguration.go", - "api_op_DeleteBucketMetadataTableConfiguration.go", - "api_op_DeleteBucketMetricsConfiguration.go", - "api_op_DeleteBucketOwnershipControls.go", - "api_op_DeleteBucketPolicy.go", - "api_op_DeleteBucketReplication.go", - "api_op_DeleteBucketTagging.go", - "api_op_DeleteBucketWebsite.go", - "api_op_DeleteObject.go", - "api_op_DeleteObjectTagging.go", - "api_op_DeleteObjects.go", - "api_op_DeletePublicAccessBlock.go", - "api_op_GetBucketAccelerateConfiguration.go", - "api_op_GetBucketAcl.go", - "api_op_GetBucketAnalyticsConfiguration.go", - "api_op_GetBucketCors.go", - "api_op_GetBucketEncryption.go", - "api_op_GetBucketIntelligentTieringConfiguration.go", - "api_op_GetBucketInventoryConfiguration.go", - "api_op_GetBucketLifecycleConfiguration.go", - "api_op_GetBucketLocation.go", - "api_op_GetBucketLogging.go", - "api_op_GetBucketMetadataConfiguration.go", - "api_op_GetBucketMetadataTableConfiguration.go", - "api_op_GetBucketMetricsConfiguration.go", - "api_op_GetBucketNotificationConfiguration.go", - "api_op_GetBucketOwnershipControls.go", - "api_op_GetBucketPolicy.go", - "api_op_GetBucketPolicyStatus.go", - "api_op_GetBucketReplication.go", - "api_op_GetBucketRequestPayment.go", - "api_op_GetBucketTagging.go", - "api_op_GetBucketVersioning.go", - "api_op_GetBucketWebsite.go", - "api_op_GetObject.go", - "api_op_GetObjectAcl.go", - "api_op_GetObjectAttributes.go", - "api_op_GetObjectLegalHold.go", - "api_op_GetObjectLockConfiguration.go", - "api_op_GetObjectRetention.go", - "api_op_GetObjectTagging.go", - "api_op_GetObjectTorrent.go", - "api_op_GetPublicAccessBlock.go", - "api_op_HeadBucket.go", - "api_op_HeadObject.go", - "api_op_ListBucketAnalyticsConfigurations.go", - "api_op_ListBucketIntelligentTieringConfigurations.go", - "api_op_ListBucketInventoryConfigurations.go", - "api_op_ListBucketMetricsConfigurations.go", - "api_op_ListBuckets.go", - "api_op_ListDirectoryBuckets.go", - "api_op_ListMultipartUploads.go", - "api_op_ListObjectVersions.go", - "api_op_ListObjects.go", - "api_op_ListObjectsV2.go", - "api_op_ListParts.go", - "api_op_PutBucketAccelerateConfiguration.go", - "api_op_PutBucketAcl.go", - "api_op_PutBucketAnalyticsConfiguration.go", - "api_op_PutBucketCors.go", - "api_op_PutBucketEncryption.go", - "api_op_PutBucketIntelligentTieringConfiguration.go", - "api_op_PutBucketInventoryConfiguration.go", - "api_op_PutBucketLifecycleConfiguration.go", - "api_op_PutBucketLogging.go", - "api_op_PutBucketMetricsConfiguration.go", - "api_op_PutBucketNotificationConfiguration.go", - "api_op_PutBucketOwnershipControls.go", - "api_op_PutBucketPolicy.go", - "api_op_PutBucketReplication.go", - "api_op_PutBucketRequestPayment.go", - "api_op_PutBucketTagging.go", - "api_op_PutBucketVersioning.go", - "api_op_PutBucketWebsite.go", - "api_op_PutObject.go", - "api_op_PutObjectAcl.go", - "api_op_PutObjectLegalHold.go", - "api_op_PutObjectLockConfiguration.go", - "api_op_PutObjectRetention.go", - "api_op_PutObjectTagging.go", - "api_op_PutPublicAccessBlock.go", - "api_op_RenameObject.go", - "api_op_RestoreObject.go", - "api_op_SelectObjectContent.go", - "api_op_UpdateBucketMetadataInventoryTableConfiguration.go", - "api_op_UpdateBucketMetadataJournalTableConfiguration.go", - "api_op_UploadPart.go", - "api_op_UploadPartCopy.go", - "api_op_WriteGetObjectResponse.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "eventstream.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "sra_operation_order_test.go", - "types/enums.go", - "types/errors.go", - "types/types.go", - "types/types_exported_test.go", - "validators.go" - ], - "go": "1.22", - "module": "github.com/aws/aws-sdk-go-v2/service/s3", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go deleted file mode 100644 index 4af533e66541..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package s3 - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.87.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go deleted file mode 100644 index 6aae79e7cac2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go +++ /dev/null @@ -1,214 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// ListObjectVersionsAPIClient is a client that implements the ListObjectVersions -// operation -type ListObjectVersionsAPIClient interface { - ListObjectVersions(context.Context, *ListObjectVersionsInput, ...func(*Options)) (*ListObjectVersionsOutput, error) -} - -var _ ListObjectVersionsAPIClient = (*Client)(nil) - -// ListObjectVersionsPaginatorOptions is the paginator options for ListObjectVersions -type ListObjectVersionsPaginatorOptions struct { - // (Optional) The maximum number of Object Versions that you want Amazon S3 to - // return. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListObjectVersionsPaginator is a paginator for ListObjectVersions -type ListObjectVersionsPaginator struct { - options ListObjectVersionsPaginatorOptions - client ListObjectVersionsAPIClient - params *ListObjectVersionsInput - firstPage bool - keyMarker *string - versionIDMarker *string - isTruncated bool -} - -// NewListObjectVersionsPaginator returns a new ListObjectVersionsPaginator -func NewListObjectVersionsPaginator(client ListObjectVersionsAPIClient, params *ListObjectVersionsInput, optFns ...func(*ListObjectVersionsPaginatorOptions)) *ListObjectVersionsPaginator { - if params == nil { - params = &ListObjectVersionsInput{} - } - - options := ListObjectVersionsPaginatorOptions{} - if params.MaxKeys != nil { - options.Limit = aws.ToInt32(params.MaxKeys) - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListObjectVersionsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - keyMarker: params.KeyMarker, - versionIDMarker: params.VersionIdMarker, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListObjectVersionsPaginator) HasMorePages() bool { - return p.firstPage || p.isTruncated -} - -// NextPage retrieves the next ListObjectVersions page. -func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.KeyMarker = p.keyMarker - params.VersionIdMarker = p.versionIDMarker - - var limit int32 - if p.options.Limit > 0 { - limit = p.options.Limit - } - if limit > 0 { - params.MaxKeys = aws.Int32(limit) - } - - result, err := p.client.ListObjectVersions(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.keyMarker - p.isTruncated = aws.ToBool(result.IsTruncated) - p.keyMarker = nil - p.versionIDMarker = nil - if aws.ToBool(result.IsTruncated) { - p.keyMarker = result.NextKeyMarker - p.versionIDMarker = result.NextVersionIdMarker - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.keyMarker != nil && - *prevToken == *p.keyMarker { - p.isTruncated = false - } - - return result, nil -} - -// ListMultipartUploadsAPIClient is a client that implements the ListMultipartUploads -// operation -type ListMultipartUploadsAPIClient interface { - ListMultipartUploads(context.Context, *ListMultipartUploadsInput, ...func(*Options)) (*ListMultipartUploadsOutput, error) -} - -var _ ListMultipartUploadsAPIClient = (*Client)(nil) - -// ListMultipartUploadsPaginatorOptions is the paginator options for ListMultipartUploads -type ListMultipartUploadsPaginatorOptions struct { - // (Optional) The maximum number of Multipart Uploads that you want Amazon S3 to - // return. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads -type ListMultipartUploadsPaginator struct { - options ListMultipartUploadsPaginatorOptions - client ListMultipartUploadsAPIClient - params *ListMultipartUploadsInput - firstPage bool - keyMarker *string - uploadIDMarker *string - isTruncated bool -} - -// NewListMultipartUploadsPaginator returns a new ListMultipartUploadsPaginator -func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, params *ListMultipartUploadsInput, optFns ...func(*ListMultipartUploadsPaginatorOptions)) *ListMultipartUploadsPaginator { - if params == nil { - params = &ListMultipartUploadsInput{} - } - - options := ListMultipartUploadsPaginatorOptions{} - if params.MaxUploads != nil { - options.Limit = aws.ToInt32(params.MaxUploads) - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListMultipartUploadsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - keyMarker: params.KeyMarker, - uploadIDMarker: params.UploadIdMarker, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListMultipartUploadsPaginator) HasMorePages() bool { - return p.firstPage || p.isTruncated -} - -// NextPage retrieves the next ListMultipartUploads page. -func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.KeyMarker = p.keyMarker - params.UploadIdMarker = p.uploadIDMarker - - var limit int32 - if p.options.Limit > 0 { - limit = p.options.Limit - } - if limit > 0 { - params.MaxUploads = aws.Int32(limit) - } - - result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.keyMarker - p.isTruncated = aws.ToBool(result.IsTruncated) - p.keyMarker = nil - p.uploadIDMarker = nil - if aws.ToBool(result.IsTruncated) { - p.keyMarker = result.NextKeyMarker - p.uploadIDMarker = result.NextUploadIdMarker - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.keyMarker != nil && - *prevToken == *p.keyMarker { - p.isTruncated = false - } - - return result, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go deleted file mode 100644 index 97b5771bb1f7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go +++ /dev/null @@ -1,106 +0,0 @@ -package arn - -import ( - "fmt" - "strings" - - awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" -) - -const ( - s3Namespace = "s3" - s3ObjectsLambdaNamespace = "s3-object-lambda" - s3OutpostsNamespace = "s3-outposts" -) - -// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource. -func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) { - return arn.ParseResource(v, accessPointResourceParser) -} - -func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { - resParts := arn.SplitResource(a.Resource) - - switch resParts[0] { - case "accesspoint": - switch a.Service { - case s3Namespace: - return arn.ParseAccessPointResource(a, resParts[1:]) - case s3ObjectsLambdaNamespace: - return parseS3ObjectLambdaAccessPointResource(a, resParts) - default: - return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} - } - case "outpost": - if a.Service != s3OutpostsNamespace { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not %s"} - } - return parseOutpostAccessPointResource(a, resParts[1:]) - default: - return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} - } -} - -func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { - // outpost accesspoint arn is only valid if service is s3-outposts - if a.Service != "s3-outposts" { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} - } - - if len(resParts) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - if len(resParts) < 3 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ - ARN: a, Reason: "access-point resource not set in Outpost ARN", - } - } - - resID := strings.TrimSpace(resParts[0]) - if len(resID) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - var outpostAccessPointARN = arn.OutpostAccessPointARN{} - switch resParts[1] { - case "accesspoint": - // Do not allow region-less outpost access-point arns. - if len(a.Region) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "region is not set"} - } - - accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) - if err != nil { - return arn.OutpostAccessPointARN{}, err - } - // set access-point arn - outpostAccessPointARN.AccessPointARN = accessPointARN - default: - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} - } - - // set outpost id - outpostAccessPointARN.OutpostID = resID - return outpostAccessPointARN, nil -} - -func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { - if a.Service != s3ObjectsLambdaNamespace { - return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} - } - - if len(a.Region) == 0 { - return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} - } - - accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) - if err != nil { - return arn.S3ObjectLambdaAccessPointARN{}, err - } - - return arn.S3ObjectLambdaAccessPointARN{ - AccessPointARN: accessPointARN, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go deleted file mode 100644 index 91b8fde0d749..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go +++ /dev/null @@ -1,21 +0,0 @@ -package customizations - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type bucketKey struct{} - -// SetBucket stores a bucket name within the request context, which is required -// for a variety of custom S3 behaviors. -func SetBucket(ctx context.Context, bucket string) context.Context { - return middleware.WithStackValue(ctx, bucketKey{}, bucket) -} - -// GetBucket retrieves a stored bucket name within a context. -func GetBucket(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, bucketKey{}).(string) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go deleted file mode 100644 index e1d1cbefa4b6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Package customizations provides customizations for the Amazon S3 API client. - -This package provides support for following S3 customizations - - ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type - - UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options - - RemoveBucket Middleware: removes a serialized bucket name from request url path - - processResponseWith200Error Middleware: Deserializing response error with 200 status code - -# Virtual Host style url addressing - -Since serializers serialize by default as path style url, we use customization -to modify the endpoint url when `UsePathStyle` option on S3Client is unset or -false. This flag will be ignored if `UseAccelerate` option is set to true. - -If UseAccelerate is not enabled, and the bucket name is not a valid hostname -label, they SDK will fallback to forcing the request to be made as if -UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled. - -https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description - -# Transfer acceleration - -By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate` -option on S3Client, one can enable s3 transfer acceleration support. Transfer -acceleration only works with Virtual Host style addressing, and thus `UsePathStyle` -option if set is ignored. Transfer acceleration is not supported for S3 operations -DeleteBucket, ListBuckets, and CreateBucket. - -# Dualstack support - -By default dualstack support for s3 client is disabled. By enabling `UseDualstack` -option on s3 client, you can enable dualstack endpoint support. - -# Endpoint customizations - -Customizations to lookup ARN, process ARN needs to happen before request serialization. -UpdateEndpoint middleware which mutates resources based on Options such as -UseDualstack, UseAccelerate for modifying resolved endpoint are executed after -request serialization. Remove bucket middleware is executed after -an request is serialized, and removes the serialized bucket name from request path - - Middleware layering: - - Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step - - Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware - -Customization options: - - UseARNRegion (Disabled by Default) - - UsePathStyle (Disabled by Default) - - UseAccelerate (Disabled by Default) - - UseDualstack (Disabled by Default) - -# Handle Error response with 200 status code - -S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an -error Response with status code 2xx. The processResponseWith200Error middleware -customizations enables SDK to check for an error within response body prior to -deserialization. - -As the check for 2xx response containing an error needs to be performed earlier -than response deserialization. Since the behavior of Deserialization is in -reverse order to the other stack steps its easier to consider that "after" means -"before". - - Middleware layering: - - HTTP Response -> handle 200 error customization -> deserialize -*/ -package customizations diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go deleted file mode 100644 index 8cc0b36248c0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go +++ /dev/null @@ -1,44 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// S3ExpressCredentialsProvider retrieves credentials for the S3Express storage -// class. -type S3ExpressCredentialsProvider interface { - Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) -} - -// ExpressIdentityResolver retrieves identity for the S3Express storage class. -type ExpressIdentityResolver struct { - Provider S3ExpressCredentialsProvider -} - -var _ (auth.IdentityResolver) = (*ExpressIdentityResolver)(nil) - -// GetIdentity retrieves AWS credentials using the underlying provider. -func (v *ExpressIdentityResolver) GetIdentity(ctx context.Context, props smithy.Properties) ( - auth.Identity, error, -) { - bucket, ok := GetIdentityPropertiesBucket(&props) - if !ok { - bucket = GetBucket(ctx) - } - if bucket == "" { - return nil, fmt.Errorf("bucket name is missing") - } - - creds, err := v.Provider.Retrieve(ctx, bucket) - if err != nil { - return nil, fmt.Errorf("get credentials: %v", err) - } - - return &internalauthsmithy.CredentialsAdapter{Credentials: creds}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go deleted file mode 100644 index bb22d3474d65..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go +++ /dev/null @@ -1,18 +0,0 @@ -package customizations - -type s3DisableExpressAuthProvider interface { - GetS3DisableExpressAuth() (bool, bool) -} - -// ResolveDisableExpressAuth pulls S3DisableExpressAuth setting from config -// sources. -func ResolveDisableExpressAuth(configs []interface{}) (value bool, exists bool) { - for _, cfg := range configs { - if p, ok := cfg.(s3DisableExpressAuthProvider); ok { - if value, exists = p.GetS3DisableExpressAuth(); exists { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go deleted file mode 100644 index cf3ff5966c95..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go +++ /dev/null @@ -1,42 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - ictx "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - "github.com/aws/smithy-go/middleware" -) - -type expressDefaultChecksumMiddleware struct{} - -func (*expressDefaultChecksumMiddleware) ID() string { - return "expressDefaultChecksum" -} - -func (*expressDefaultChecksumMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if ictx.GetS3Backend(ctx) == ictx.S3BackendS3Express && ictx.GetChecksumInputAlgorithm(ctx) == "" { - ctx = ictx.SetChecksumInputAlgorithm(ctx, string(checksum.AlgorithmCRC32)) - } - return next.HandleFinalize(ctx, in) -} - -// AddExpressDefaultChecksumMiddleware appends a step to default to CRC32 for -// S3Express requests. This should only be applied to operations where a -// checksum is required (e.g. DeleteObject). -func AddExpressDefaultChecksumMiddleware(s *middleware.Stack) error { - err := s.Finalize.Insert( - &expressDefaultChecksumMiddleware{}, - "AWSChecksum:ComputeInputPayloadChecksum", - middleware.Before, - ) - if err != nil { - return fmt.Errorf("add expressDefaultChecksum: %v", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go deleted file mode 100644 index 171de4613054..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go +++ /dev/null @@ -1,21 +0,0 @@ -package customizations - -import "github.com/aws/smithy-go" - -// GetPropertiesBackend returns a resolved endpoint backend from the property -// set. -func GetPropertiesBackend(p *smithy.Properties) string { - v, _ := p.Get("backend").(string) - return v -} - -// GetIdentityPropertiesBucket returns the S3 bucket from identity properties. -func GetIdentityPropertiesBucket(ip *smithy.Properties) (string, bool) { - v, ok := ip.Get(bucketKey{}).(string) - return v, ok -} - -// SetIdentityPropertiesBucket sets the S3 bucket to identity properties. -func SetIdentityPropertiesBucket(ip *smithy.Properties, bucket string) { - ip.Set(bucketKey{}, bucket) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go deleted file mode 100644 index 545e5b220d54..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go +++ /dev/null @@ -1,109 +0,0 @@ -package customizations - -import ( - "context" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/smithy-go/middleware" -) - -const ( - s3ExpressSignerVersion = "com.amazonaws.s3#sigv4express" - headerAmzSessionToken = "x-amz-s3session-token" -) - -// adapts a v4 signer for S3Express -type s3ExpressSignerAdapter struct { - v4 v4.HTTPSigner -} - -// SignHTTP performs S3Express signing on a request, which is identical to -// SigV4 signing save for an additional header containing the S3Express -// session token. -func (s *s3ExpressSignerAdapter) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error { - r.Header.Set(headerAmzSessionToken, credentials.SessionToken) - optFns = append(optFns, func(o *v4.SignerOptions) { - o.DisableSessionToken = true - }) - return s.v4.SignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) -} - -// adapts S3ExpressCredentialsProvider to the standard AWS -// CredentialsProvider interface -type s3ExpressCredentialsAdapter struct { - provider S3ExpressCredentialsProvider - bucket string -} - -func (c *s3ExpressCredentialsAdapter) Retrieve(ctx context.Context) (aws.Credentials, error) { - return c.provider.Retrieve(ctx, c.bucket) -} - -// S3ExpressSignHTTPRequestMiddleware signs S3 S3Express requests. -// -// This is NOT mutually exclusive with existing v4 or v4a signer handling on -// the stack itself, but only one handler will actually perform signing based -// on the provided signing version in the context. -type S3ExpressSignHTTPRequestMiddleware struct { - Credentials S3ExpressCredentialsProvider - Signer v4.HTTPSigner - LogSigning bool -} - -// ID identifies S3ExpressSignHTTPRequestMiddleware. -func (*S3ExpressSignHTTPRequestMiddleware) ID() string { - return "S3ExpressSigning" -} - -// HandleFinalize will sign the request if the S3Express signer has been -// selected. -func (m *S3ExpressSignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetSignerVersion(ctx) != s3ExpressSignerVersion { - return next.HandleFinalize(ctx, in) - } - - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: m.credentialsAdapter(ctx), - Signer: m.signerAdapter(), - LogSigning: m.LogSigning, - }) - return mw.HandleFinalize(ctx, in, next) -} - -func (m *S3ExpressSignHTTPRequestMiddleware) credentialsAdapter(ctx context.Context) aws.CredentialsProvider { - return &s3ExpressCredentialsAdapter{ - provider: m.Credentials, - bucket: GetBucket(ctx), - } -} - -func (m *S3ExpressSignHTTPRequestMiddleware) signerAdapter() v4.HTTPSigner { - return &s3ExpressSignerAdapter{v4: m.Signer} -} - -type s3ExpressPresignerAdapter struct { - v4 v4.HTTPPresigner -} - -// SignHTTP performs S3Express signing on a request, which is identical to -// SigV4 signing save for an additional header containing the S3Express -// session token. -func (s *s3ExpressPresignerAdapter) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) ( - string, http.Header, error, -) { - r.Header.Set(headerAmzSessionToken, credentials.SessionToken) - optFns = append(optFns, func(o *v4.SignerOptions) { - o.DisableSessionToken = true - }) - return s.v4.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) -} - -var ( - _ aws.CredentialsProvider = &s3ExpressCredentialsAdapter{} - _ v4.HTTPSigner = &s3ExpressSignerAdapter{} -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go deleted file mode 100644 index e3ec7f011067..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go +++ /dev/null @@ -1,61 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ExpressSigner signs requests for the sigv4-s3express auth scheme. -// -// This signer respects the aws.auth#sigv4 properties for signing name and -// region. -type ExpressSigner struct { - Signer v4.HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*ExpressSigner)(nil) - -// SignRequest signs the request with the provided identity. -func (v *ExpressSigner) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*internalauthsmithy.CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4 signing name is required for s3express variant") - } - - region, ok := smithyhttp.GetSigV4SigningRegion(&props) - if !ok { - return fmt.Errorf("sigv4 signing region is required for s3express variant") - } - - hash := v4.GetPayloadHash(ctx) - - r.Header.Set(headerAmzSessionToken, ca.Credentials.SessionToken) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { - o.DisableSessionToken = true - - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %v", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go deleted file mode 100644 index 2b11b1fa278a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go +++ /dev/null @@ -1,74 +0,0 @@ -package customizations - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "strings" - - "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// HandleResponseErrorWith200Status check for S3 200 error response. -// If an s3 200 error is found, status code for the response is modified temporarily to -// 5xx response status code. -func HandleResponseErrorWith200Status(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After) -} - -// middleware to process raw response and look for error response with 200 status code -type processResponseFor200ErrorMiddleware struct{} - -// ID returns the middleware ID. -func (*processResponseFor200ErrorMiddleware) ID() string { - return "S3:ProcessResponseFor200Error" -} - -func (m *processResponseFor200ErrorMiddleware) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - // check if response status code is 2xx. - if response.StatusCode < 200 || response.StatusCode >= 300 { - return - } - - var readBuff bytes.Buffer - body := io.TeeReader(response.Body, &readBuff) - - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("received empty response payload"), - } - } - - // rewind response body - response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body)) - - // if start tag is "Error", the response is consider error response. - if strings.EqualFold(t.Name.Local, "Error") { - // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ - // 200 error responses are similar to 5xx errors. - response.StatusCode = 500 - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go deleted file mode 100644 index 87f7a22327d0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go +++ /dev/null @@ -1,22 +0,0 @@ -package customizations - -import ( - "github.com/aws/smithy-go/transport/http" - "strings" -) - -func updateS3HostForS3AccessPoint(req *http.Request) { - updateHostPrefix(req, "s3", s3AccessPoint) -} - -func updateS3HostForS3ObjectLambda(req *http.Request) { - updateHostPrefix(req, "s3", s3ObjectLambda) -} - -func updateHostPrefix(req *http.Request, oldEndpointPrefix, newEndpointPrefix string) { - host := req.URL.Host - if strings.HasPrefix(host, oldEndpointPrefix) { - // For example if oldEndpointPrefix=s3 would replace to newEndpointPrefix - req.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go deleted file mode 100644 index f4bbb4b6de1e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go +++ /dev/null @@ -1,49 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "strconv" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddExpiresOnPresignedURL represents a build middleware used to assign -// expiration on a presigned URL. -type AddExpiresOnPresignedURL struct { - - // Expires is time.Duration within which presigned url should be expired. - // This should be the duration in seconds the presigned URL should be considered valid for. - // By default the S3 presigned url expires in 15 minutes ie. 900 seconds. - Expires time.Duration -} - -// ID representing the middleware -func (*AddExpiresOnPresignedURL) ID() string { - return "S3:AddExpiresOnPresignedURL" -} - -// HandleBuild handles the build step middleware behavior -func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - // if expiration is unset skip this middleware - if m.Expires == 0 { - // default to 15 * time.Minutes - m.Expires = 15 * time.Minute - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - // set S3 X-AMZ-Expires header - query := req.URL.Query() - query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10)) - req.URL.RawQuery = query.Encode() - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go deleted file mode 100644 index bbc971f2a2fe..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go +++ /dev/null @@ -1,568 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "net/url" - "strings" - - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" - s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn" - "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" -) - -const ( - s3AccessPoint = "s3-accesspoint" - s3ObjectLambda = "s3-object-lambda" -) - -// processARNResource is used to process an ARN resource. -type processARNResource struct { - - // UseARNRegion indicates if region parsed from an ARN should be used. - UseARNRegion bool - - // UseAccelerate indicates if s3 transfer acceleration is enabled - UseAccelerate bool - - // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver - EndpointResolver EndpointResolver - - // EndpointResolverOptions used by endpoint resolver - EndpointResolverOptions EndpointResolverOptions - - // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled - DisableMultiRegionAccessPoints bool -} - -// ID returns the middleware ID. -func (*processARNResource) ID() string { return "S3:ProcessARNResource" } - -func (m *processARNResource) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - // check if arn was provided, if not skip this middleware - arnValue, ok := s3shared.GetARNResourceFromContext(ctx) - if !ok { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // parse arn into an endpoint arn wrt to service - resource, err := s3arn.ParseEndpointARN(arnValue) - if err != nil { - return out, metadata, err - } - - // build a resource request struct - resourceRequest := s3shared.ResourceRequest{ - Resource: resource, - UseARNRegion: m.UseARNRegion, - UseFIPS: m.EndpointResolverOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled, - RequestRegion: awsmiddleware.GetRegion(ctx), - SigningRegion: awsmiddleware.GetSigningRegion(ctx), - PartitionID: awsmiddleware.GetPartitionID(ctx), - } - - // switch to correct endpoint updater - switch tv := resource.(type) { - case arn.AccessPointARN: - // multi-region arns do not need to validate for cross partition request - if len(tv.Region) != 0 { - // validate resource request - if err := validateRegionForResourceRequest(resourceRequest); err != nil { - return out, metadata, err - } - } - - // Special handling for region-less ap-arns. - if len(tv.Region) == 0 { - // check if multi-region arn support is disabled - if m.DisableMultiRegionAccessPoints { - return out, metadata, fmt.Errorf("Invalid configuration, Multi-Region access point ARNs are disabled") - } - - // Do not allow dual-stack configuration with multi-region arns. - if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - } - - // check if accelerate - if m.UseAccelerate { - return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // fetch arn region to resolve request - resolveRegion := tv.Region - // check if request region is FIPS - if resourceRequest.UseFIPS && len(resolveRegion) == 0 { - // Do not allow Fips support within multi-region arns. - return out, metadata, s3shared.NewClientConfiguredForFIPSError( - tv, resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - var requestBuilder func(context.Context, accesspointOptions) (context.Context, error) - if len(resolveRegion) == 0 { - requestBuilder = buildMultiRegionAccessPointsRequest - } else { - requestBuilder = buildAccessPointRequest - } - - // build request as per accesspoint builder - ctx, err = requestBuilder(ctx, accesspointOptions{ - processARNResource: *m, - request: req, - resource: tv, - resolveRegion: resolveRegion, - partitionID: resourceRequest.PartitionID, - requestRegion: resourceRequest.RequestRegion, - }) - if err != nil { - return out, metadata, err - } - - case arn.S3ObjectLambdaAccessPointARN: - // validate region for resource request - if err := validateRegionForResourceRequest(resourceRequest); err != nil { - return out, metadata, err - } - - // check if accelerate - if m.UseAccelerate { - return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if dualstack - if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // fetch arn region to resolve request - resolveRegion := tv.Region - - // build access point request - ctx, err = buildS3ObjectLambdaAccessPointRequest(ctx, accesspointOptions{ - processARNResource: *m, - request: req, - resource: tv.AccessPointARN, - resolveRegion: resolveRegion, - partitionID: resourceRequest.PartitionID, - requestRegion: resourceRequest.RequestRegion, - }) - if err != nil { - return out, metadata, err - } - - // process outpost accesspoint ARN - case arn.OutpostAccessPointARN: - // validate region for resource request - if err := validateRegionForResourceRequest(resourceRequest); err != nil { - return out, metadata, err - } - - // check if accelerate - if m.UseAccelerate { - return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if dual stack - if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if request region is FIPS - if resourceRequest.UseFIPS { - return out, metadata, s3shared.NewFIPSConfigurationError(tv, resourceRequest.PartitionID, - resourceRequest.RequestRegion, nil) - } - - // build outpost access point request - ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{ - processARNResource: *m, - resource: tv, - request: req, - partitionID: resourceRequest.PartitionID, - requestRegion: resourceRequest.RequestRegion, - }) - if err != nil { - return out, metadata, err - } - - default: - return out, metadata, s3shared.NewInvalidARNError(resource, nil) - } - - return next.HandleSerialize(ctx, in) -} - -// validate if s3 resource and request region config is compatible. -func validateRegionForResourceRequest(resourceRequest s3shared.ResourceRequest) error { - // check if resourceRequest leads to a cross partition error - v, err := resourceRequest.IsCrossPartition() - if err != nil { - return err - } - if v { - // if cross partition - return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if resourceRequest leads to a cross region error - if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() { - // if cross region, but not use ARN region is not enabled - return s3shared.NewClientRegionMismatchError(resourceRequest.Resource, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - return nil -} - -// === Accesspoint ========== - -type accesspointOptions struct { - processARNResource - request *http.Request - resource arn.AccessPointARN - resolveRegion string - partitionID string - requestRegion string -} - -func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { - tv := options.resource - req := options.request - resolveRegion := options.resolveRegion - - resolveService := tv.Service - - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - ero.ResolvedRegion = "" // clear endpoint option's resolved region so that we resolve using the passed in region - - // resolve endpoint - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - // Must sign with s3-object-lambda - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - if len(endpoint.SigningRegion) != 0 { - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) - } - - // update serviceID to "s3-accesspoint" - ctx = awsmiddleware.SetServiceID(ctx, s3AccessPoint) - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - // skip arn processing, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - updateS3HostForS3AccessPoint(req) - - ctx, err = buildAccessPointHostPrefix(ctx, req, tv) - if err != nil { - return ctx, err - } - - return ctx, nil -} - -func buildS3ObjectLambdaAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { - tv := options.resource - req := options.request - resolveRegion := options.resolveRegion - - resolveService := tv.Service - - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - ero.ResolvedRegion = "" // clear endpoint options resolved region so we resolve the passed in region - - // resolve endpoint - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - // Must sign with s3-object-lambda - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - if len(endpoint.SigningRegion) != 0 { - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) - } - - // update serviceID to "s3-object-lambda" - ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - // skip arn processing, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - if endpoint.Source == aws.EndpointSourceServiceMetadata { - updateS3HostForS3ObjectLambda(req) - } - - ctx, err = buildAccessPointHostPrefix(ctx, req, tv) - if err != nil { - return ctx, err - } - - return ctx, nil -} - -func buildMultiRegionAccessPointsRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { - const s3GlobalLabel = "s3-global." - const accesspointLabel = "accesspoint." - - tv := options.resource - req := options.request - resolveService := tv.Service - resolveRegion := options.requestRegion - arnPartition := tv.Partition - - // resolve endpoint - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // set signing region and version for MRAP - endpoint.SigningRegion = "*" - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = SetSignerVersion(ctx, v4a.Version) - - if len(endpoint.SigningName) != 0 { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - // skip arn processing, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - // modify endpoint host to use s3-global host prefix - scheme := strings.SplitN(endpoint.URL, "://", 2) - dnsSuffix, err := endpoints.GetDNSSuffix(arnPartition, ero) - if err != nil { - return ctx, fmt.Errorf("Error determining dns suffix from arn partition, %w", err) - } - // set url as per partition - endpoint.URL = scheme[0] + "://" + s3GlobalLabel + dnsSuffix - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - // build access point host prefix - accessPointHostPrefix := tv.AccessPointName + "." + accesspointLabel - - // add host prefix to url - req.URL.Host = accessPointHostPrefix + req.URL.Host - if len(req.Host) > 0 { - req.Host = accessPointHostPrefix + req.Host - } - - // validate the endpoint host - if err := http.ValidateEndpointHost(req.URL.Host); err != nil { - return ctx, fmt.Errorf("endpoint validation error: %w, when using arn %v", err, tv) - } - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - return ctx, nil -} - -func buildAccessPointHostPrefix(ctx context.Context, req *http.Request, tv arn.AccessPointARN) (context.Context, error) { - // add host prefix for access point - accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." - req.URL.Host = accessPointHostPrefix + req.URL.Host - if len(req.Host) > 0 { - req.Host = accessPointHostPrefix + req.Host - } - - // validate the endpoint host - if err := http.ValidateEndpointHost(req.URL.Host); err != nil { - return ctx, s3shared.NewInvalidARNError(tv, err) - } - - return ctx, nil -} - -// ====== Outpost Accesspoint ======== - -type outpostAccessPointOptions struct { - processARNResource - request *http.Request - resource arn.OutpostAccessPointARN - partitionID string - requestRegion string -} - -func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) { - tv := options.resource - req := options.request - - resolveRegion := tv.Region - resolveService := tv.Service - endpointsID := resolveService - if strings.EqualFold(resolveService, "s3-outposts") { - // assign endpoints ID as "S3" - endpointsID = "s3" - } - - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - ero.ResolvedRegion = "" - - // resolve regional endpoint for resolved region. - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - // assign resolved service from arn as signing name - if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - if len(endpoint.SigningRegion) != 0 { - // redirect signer to use resolved endpoint signing name and region - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) - } - - // update serviceID to resolved service id - ctx = awsmiddleware.SetServiceID(ctx, resolveService) - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - // skip further customizations, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - updateHostPrefix(req, endpointsID, resolveService) - - // add host prefix for s3-outposts - outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "." - req.URL.Host = outpostAPHostPrefix + req.URL.Host - if len(req.Host) > 0 { - req.Host = outpostAPHostPrefix + req.Host - } - - // validate the endpoint host - if err := http.ValidateEndpointHost(req.URL.Host); err != nil { - return ctx, s3shared.NewInvalidARNError(tv, err) - } - - return ctx, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go deleted file mode 100644 index cf3f4dc8b65a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go +++ /dev/null @@ -1,63 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" -) - -// removeBucketFromPathMiddleware needs to be executed after serialize step is performed -type removeBucketFromPathMiddleware struct { -} - -func (m *removeBucketFromPathMiddleware) ID() string { - return "S3:RemoveBucketFromPathMiddleware" -} - -func (m *removeBucketFromPathMiddleware) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - // check if a bucket removal from HTTP path is required - bucket, ok := getRemoveBucketFromPath(ctx) - if !ok { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - removeBucketFromPath(req.URL, bucket) - return next.HandleSerialize(ctx, in) -} - -type removeBucketKey struct { - bucket string -} - -// setBucketToRemoveOnContext sets the bucket name to be removed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context { - return middleware.WithStackValue(ctx, removeBucketKey{}, bucket) -} - -// getRemoveBucketFromPath returns the bucket name to remove from the path. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func getRemoveBucketFromPath(ctx context.Context) (string, bool) { - v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string) - return v, ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go deleted file mode 100644 index 6e1d447243ce..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go +++ /dev/null @@ -1,88 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - "net/url" -) - -type s3ObjectLambdaEndpoint struct { - // whether the operation should use the s3-object-lambda endpoint - UseEndpoint bool - - // use transfer acceleration - UseAccelerate bool - - EndpointResolver EndpointResolver - EndpointResolverOptions EndpointResolverOptions -} - -func (t *s3ObjectLambdaEndpoint) ID() string { - return "S3:ObjectLambdaEndpoint" -} - -func (t *s3ObjectLambdaEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - if !t.UseEndpoint { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) - } - - if t.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, fmt.Errorf("client configured for dualstack but not supported for operation") - } - - if t.UseAccelerate { - return out, metadata, fmt.Errorf("client configured for accelerate but not supported for operation") - } - - region := awsmiddleware.GetRegion(ctx) - - ero := t.EndpointResolverOptions - - endpoint, err := t.EndpointResolver.ResolveEndpoint(region, ero) - if err != nil { - return out, metadata, err - } - - // Set the ServiceID and SigningName - ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) - - if len(endpoint.SigningName) > 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - ctx = awsmiddleware.SetSigningName(ctx, s3ObjectLambda) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, err - } - - if len(endpoint.SigningRegion) > 0 { - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, region) - } - - if endpoint.Source == aws.EndpointSourceServiceMetadata || !endpoint.HostnameImmutable { - updateS3HostForS3ObjectLambda(req) - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go deleted file mode 100644 index 756823cb7588..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go +++ /dev/null @@ -1,227 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - "github.com/aws/smithy-go/middleware" -) - -type signerVersionKey struct{} - -// GetSignerVersion retrieves the signer version to use for signing -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetSignerVersion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signerVersionKey{}).(string) - return v -} - -// SetSignerVersion sets the signer version to be used for signing the request -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetSignerVersion(ctx context.Context, version string) context.Context { - return middleware.WithStackValue(ctx, signerVersionKey{}, version) -} - -// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. -type SignHTTPRequestMiddlewareOptions struct { - - // credential provider - CredentialsProvider aws.CredentialsProvider - - // log signing - LogSigning bool - - // v4 signer - V4Signer v4.HTTPSigner - - //v4a signer - V4aSigner v4a.HTTPSigner -} - -// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - v4Signer: options.V4Signer, - v4aSigner: options.V4aSigner, - logSigning: options.LogSigning, - } -} - -// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation to select HTTP Signing method -type SignHTTPRequestMiddleware struct { - - // credential provider - credentialsProvider aws.CredentialsProvider - - // log signing - logSigning bool - - // v4 signer - v4Signer v4.HTTPSigner - - //v4a signer - v4aSigner v4a.HTTPSigner -} - -// ID is the SignHTTPRequestMiddleware identifier -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize will take the provided input and handle signing for either -// SigV4 or SigV4A as called for. -func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - sv := GetSignerVersion(ctx) - - if strings.EqualFold(sv, v4.Version) { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: s.credentialsProvider, - Signer: s.v4Signer, - LogSigning: s.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - } else if strings.EqualFold(sv, v4a.Version) { - v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) - if !ok { - return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") - } - - mw := v4a.NewSignHTTPRequestMiddleware(v4a.SignHTTPRequestMiddlewareOptions{ - Credentials: v4aCredentialProvider, - Signer: s.v4aSigner, - LogSigning: s.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - } - - return next.HandleFinalize(ctx, in) -} - -// RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already -// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the -// finalize step. -func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { - const signedID = "Signing" - _, present := stack.Finalize.Get(signedID) - if present { - _, err = stack.Finalize.Swap(signedID, signingMiddleware) - } else { - err = stack.Finalize.Add(signingMiddleware, middleware.After) - } - return err -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - ExpressCredentials S3ExpressCredentialsProvider - V4Presigner v4.HTTPPresigner - V4aPresigner v4a.HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - - // cred provider and signer for sigv4 - credentialsProvider aws.CredentialsProvider - - // s3Express credentials - expressCredentials S3ExpressCredentialsProvider - - // sigV4 signer - v4Signer v4.HTTPPresigner - - // sigV4a signer - v4aSigner v4a.HTTPPresigner - - // log signing - logSigning bool -} - -// NewPresignHTTPRequestMiddleware constructs a PresignHTTPRequestMiddleware using the given Signer for signing requests -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - expressCredentials: options.ExpressCredentials, - v4Signer: options.V4Presigner, - v4aSigner: options.V4aPresigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 or SigV4a presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (p *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - // fetch signer type from context - signerVersion := GetSignerVersion(ctx) - - switch signerVersion { - case "aws.auth#sigv4a": - mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: &v4a.SymmetricCredentialAdaptor{ - SymmetricProvider: p.credentialsProvider, - }, - Presigner: p.v4aSigner, - LogSigning: p.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - case "aws.auth#sigv4": - mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: p.credentialsProvider, - Presigner: p.v4Signer, - LogSigning: p.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - case s3ExpressSignerVersion: - mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: &s3ExpressCredentialsAdapter{ - provider: p.expressCredentials, - bucket: GetBucket(ctx), - }, - Presigner: &s3ExpressPresignerAdapter{v4: p.v4Signer}, - LogSigning: p.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - default: - return out, metadata, fmt.Errorf("unsupported signer type \"%s\"", signerVersion) - } -} - -// RegisterPreSigningMiddleware registers the wrapper pre-signing middleware to the stack. If a pre-signing middleware is already -// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the -// finalize step. -func RegisterPreSigningMiddleware(stack *middleware.Stack, signingMiddleware *PresignHTTPRequestMiddleware) (err error) { - const signedID = "PresignHTTPRequest" - _, present := stack.Finalize.Get(signedID) - if present { - _, err = stack.Finalize.Swap(signedID, signingMiddleware) - } else { - err = stack.Finalize.Add(signingMiddleware, middleware.After) - } - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go deleted file mode 100644 index eedfc7eefa4d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go +++ /dev/null @@ -1,310 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "log" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// UpdateEndpointParameterAccessor represents accessor functions used by the middleware -type UpdateEndpointParameterAccessor struct { - // functional pointer to fetch bucket name from provided input. - // The function is intended to take an input value, and - // return a string pointer to value of string, and bool if - // input has no bucket member. - GetBucketFromInput func(interface{}) (*string, bool) -} - -// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup. -type UpdateEndpointOptions struct { - // Accessor are parameter accessors used by the middleware - Accessor UpdateEndpointParameterAccessor - - // use path style - UsePathStyle bool - - // use transfer acceleration - UseAccelerate bool - - // indicates if an operation supports s3 transfer acceleration. - SupportsAccelerate bool - - // use ARN region - UseARNRegion bool - - // Indicates that the operation should target the s3-object-lambda endpoint. - // Used to direct operations that do not route based on an input ARN. - TargetS3ObjectLambda bool - - // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver - EndpointResolver EndpointResolver - - // EndpointResolverOptions used by endpoint resolver - EndpointResolverOptions EndpointResolverOptions - - // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled - DisableMultiRegionAccessPoints bool -} - -// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions. -func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) { - const serializerID = "OperationSerializer" - - // initial arn look up middleware - err = stack.Initialize.Insert(&s3shared.ARNLookup{ - GetARNValue: options.Accessor.GetBucketFromInput, - }, "legacyEndpointContextSetter", middleware.After) - if err != nil { - return err - } - - // process arn - err = stack.Serialize.Insert(&processARNResource{ - UseARNRegion: options.UseARNRegion, - UseAccelerate: options.UseAccelerate, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointResolverOptions, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }, serializerID, middleware.Before) - if err != nil { - return err - } - - // process whether the operation requires the s3-object-lambda endpoint - // Occurs before operation serializer so that hostPrefix mutations - // can be handled correctly. - err = stack.Serialize.Insert(&s3ObjectLambdaEndpoint{ - UseEndpoint: options.TargetS3ObjectLambda, - UseAccelerate: options.UseAccelerate, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointResolverOptions, - }, serializerID, middleware.Before) - if err != nil { - return err - } - - // remove bucket arn middleware - err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, serializerID, middleware.After) - if err != nil { - return err - } - - // update endpoint to use options for path style and accelerate - err = stack.Serialize.Insert(&updateEndpoint{ - usePathStyle: options.UsePathStyle, - getBucketFromInput: options.Accessor.GetBucketFromInput, - useAccelerate: options.UseAccelerate, - supportsAccelerate: options.SupportsAccelerate, - }, serializerID, middleware.After) - if err != nil { - return err - } - - return err -} - -type updateEndpoint struct { - // path style options - usePathStyle bool - getBucketFromInput func(interface{}) (*string, bool) - - // accelerate options - useAccelerate bool - supportsAccelerate bool -} - -// ID returns the middleware ID. -func (*updateEndpoint) ID() string { - return "S3:UpdateEndpoint" -} - -func (u *updateEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - // if arn was processed, skip this middleware - if _, ok := s3shared.GetARNResourceFromContext(ctx); ok { - return next.HandleSerialize(ctx, in) - } - - // skip this customization if host name is set as immutable - if smithyhttp.GetHostnameImmutable(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // check if accelerate is supported - if u.useAccelerate && !u.supportsAccelerate { - // accelerate is not supported, thus will be ignored - log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.") - u.useAccelerate = false - } - - // transfer acceleration is not supported with path style urls - if u.useAccelerate && u.usePathStyle { - log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.") - u.usePathStyle = false - } - - if u.getBucketFromInput != nil { - // Below customization only apply if bucket name is provided - bucket, ok := u.getBucketFromInput(in.Parameters) - if ok && bucket != nil { - region := awsmiddleware.GetRegion(ctx) - if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil { - return out, metadata, err - } - } - } - - return next.HandleSerialize(ctx, in) -} - -func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error { - // do nothing if path style is enforced - if u.usePathStyle { - return nil - } - - if !hostCompatibleBucketName(req.URL, bucket) { - // bucket name must be valid to put into the host for accelerate operations. - // For non-accelerate operations the bucket name can stay in the path if - // not valid hostname. - var err error - if u.useAccelerate { - err = fmt.Errorf("bucket name %s is not compatible with S3", bucket) - } - - // No-Op if not using accelerate. - return err - } - - // accelerate is only supported if use path style is disabled - if u.useAccelerate { - parts := strings.Split(req.URL.Host, ".") - if len(parts) < 3 { - return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host) - } - - if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { - parts[0] = "s3-accelerate" - } - - for i := 1; i+1 < len(parts); i++ { - if strings.EqualFold(parts[i], region) { - parts = append(parts[:i], parts[i+1:]...) - break - } - } - - // construct the url host - req.URL.Host = strings.Join(parts, ".") - } - - // move bucket to follow virtual host style - moveBucketNameToHost(req.URL, bucket) - return nil -} - -// updates endpoint to use virtual host styling -func moveBucketNameToHost(u *url.URL, bucket string) { - u.Host = bucket + "." + u.Host - removeBucketFromPath(u, bucket) -} - -// remove bucket from url -func removeBucketFromPath(u *url.URL, bucket string) { - if strings.HasPrefix(u.Path, "/"+bucket) { - // modify url path - u.Path = strings.Replace(u.Path, "/"+bucket, "", 1) - - // modify url raw path - u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1) - } - - if u.Path == "" { - u.Path = "/" - } - - if u.RawPath == "" { - u.RawPath = "/" - } -} - -// hostCompatibleBucketName returns true if the request should -// put the bucket in the host. This is false if the bucket is not -// DNS compatible or the EndpointResolver resolves an aws.Endpoint with -// HostnameImmutable member set to true. -// -// https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Endpoint.HostnameImmutable -func hostCompatibleBucketName(u *url.URL, bucket string) bool { - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if u.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - if strings.Contains(bucket, "..") { - return false - } - - // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping - if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) { - return false - } - - for _, c := range bucket[1:] { - if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) { - return false - } - } - - // checks for `^(\d+\.){3}\d+$` IPaddressing - v := strings.SplitN(bucket, ".", -1) - if len(v) == 4 { - for _, c := range bucket { - if !((c > 47 && c < 58) || c == 46) { - // we confirm that this is not a IP address - return true - } - } - // this is a IP address - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go deleted file mode 100644 index 89faaa4601df..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go +++ /dev/null @@ -1,1138 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" - "strings" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver S3 endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsEusc *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "af-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.af-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-south-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-5.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-7", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-7", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-7.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "aws-global", - }: endpoints.Endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-central-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.ca-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ca-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.ca-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ca-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-central-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-north-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-north-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-south-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-west-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-3", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-west-3.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "fips-ca-central-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-ca-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-east-2", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-west-2", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "il-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.il-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.me-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.me-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "mx-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "mx-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.mx-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "s3-external-1", - }: endpoints.Endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-north-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - }, - }, - }, - { - ID: "aws-eusc", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsEusc, - IsRegionalized: true, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "fips-us-iso-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-iso-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-iso-east-1", - }: endpoints.Endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-iso-east-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-iso-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", - }, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.sc2s.sgov.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "fips-us-isob-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-isob-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-isob-east-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", - }, - endpoints.EndpointKey{ - Region: "us-isob-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.cloud.adc-e.uk", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "eu-isoe-west-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.csp.hci.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-isof-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-isof-south-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "fips-us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, -} - -// GetDNSSuffix returns the dnsSuffix URL component for the given partition id -func GetDNSSuffix(id string, options Options) (string, error) { - variant := transformToSharedOptions(options).GetEndpointVariant() - switch { - case strings.EqualFold(id, "aws"): - switch variant { - case endpoints.DualStackVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant | endpoints.DualStackVariant: - return "amazonaws.com", nil - - case 0: - return "amazonaws.com", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-cn"): - switch variant { - case endpoints.DualStackVariant: - return "amazonaws.com.cn", nil - - case endpoints.FIPSVariant: - return "amazonaws.com.cn", nil - - case endpoints.FIPSVariant | endpoints.DualStackVariant: - return "api.amazonwebservices.com.cn", nil - - case 0: - return "amazonaws.com.cn", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-eusc"): - switch variant { - case endpoints.FIPSVariant: - return "amazonaws.eu", nil - - case 0: - return "amazonaws.eu", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso"): - switch variant { - case endpoints.FIPSVariant: - return "c2s.ic.gov", nil - - case 0: - return "c2s.ic.gov", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso-b"): - switch variant { - case endpoints.FIPSVariant: - return "sc2s.sgov.gov", nil - - case 0: - return "sc2s.sgov.gov", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso-e"): - switch variant { - case endpoints.FIPSVariant: - return "cloud.adc-e.uk", nil - - case 0: - return "cloud.adc-e.uk", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso-f"): - switch variant { - case endpoints.FIPSVariant: - return "csp.hci.ic.gov", nil - - case 0: - return "csp.hci.ic.gov", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-us-gov"): - switch variant { - case endpoints.DualStackVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant | endpoints.DualStackVariant: - return "amazonaws.com", nil - - case 0: - return "amazonaws.com", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - default: - return "", fmt.Errorf("unknown partition") - - } -} - -// GetDNSSuffixFromRegion returns the DNS suffix for the provided region and -// options. -func GetDNSSuffixFromRegion(region string, options Options) (string, error) { - switch { - case partitionRegexp.Aws.MatchString(region): - return GetDNSSuffix("aws", options) - - case partitionRegexp.AwsCn.MatchString(region): - return GetDNSSuffix("aws-cn", options) - - case partitionRegexp.AwsEusc.MatchString(region): - return GetDNSSuffix("aws-eusc", options) - - case partitionRegexp.AwsIso.MatchString(region): - return GetDNSSuffix("aws-iso", options) - - case partitionRegexp.AwsIsoB.MatchString(region): - return GetDNSSuffix("aws-iso-b", options) - - case partitionRegexp.AwsIsoE.MatchString(region): - return GetDNSSuffix("aws-iso-e", options) - - case partitionRegexp.AwsIsoF.MatchString(region): - return GetDNSSuffix("aws-iso-f", options) - - case partitionRegexp.AwsUsGov.MatchString(region): - return GetDNSSuffix("aws-us-gov", options) - - default: - return GetDNSSuffix("aws", options) - - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go deleted file mode 100644 index 9c1e834e464b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go +++ /dev/null @@ -1,350 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The threshold ContentLength in bytes for HTTP PUT request to receive {Expect: - // 100-continue} header. Setting to -1 will disable adding the Expect header to - // requests; setting to 0 will set the threshold to default 2MB - ContinueHeaderThresholdBytes int64 - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // Disables logging when the client skips output checksum validation due to lack - // of algorithm support. - DisableLogOutputChecksumValidationSkipped bool - - // Allows you to disable S3 Multi-Region access points feature. - DisableMultiRegionAccessPoints bool - - // Disables this client's usage of Session Auth for S3Express buckets and reverts - // to using conventional SigV4 for those. - DisableS3ExpressSessionAuth *bool - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // The credentials provider for S3Express requests. - ExpressCredentials ExpressCredentialsProvider - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // Provides idempotency tokens values that will be automatically populated into - // idempotent API operations. - IdempotencyTokenProvider IdempotencyTokenProvider - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // Indicates how user opt-in/out request checksum calculation - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Indicates how user opt-in/out response checksum validation - ResponseChecksumValidation aws.ResponseChecksumValidation - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // Allows you to enable arn region support for the service. - UseARNRegion bool - - // Allows you to enable S3 Accelerate feature. All operations compatible with S3 - // Accelerate will use the accelerate endpoint for requests. Requests not - // compatible will fall back to normal S3 requests. The bucket must be enabled for - // accelerate to be used with S3 client with accelerate enabled. If the bucket is - // not enabled for accelerate an error will be returned. The bucket name must be - // DNS compatible to work with accelerate. - UseAccelerate bool - - // Allows you to enable dual-stack endpoint support for the service. - // - // Deprecated: Set dual-stack by setting UseDualStackEndpoint on - // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint - // field is set it overrides this field value. - UseDualstack bool - - // Allows you to enable the client to use path-style addressing, i.e., - // https://s3.amazonaws.com/BUCKET/KEY . By default, the S3 client will use virtual - // hosted bucket addressing when possible( https://BUCKET.s3.amazonaws.com/KEY ). - UsePathStyle bool - - // Signature Version 4a (SigV4a) Signer - httpSignerV4a httpSignerV4a - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Client registry of operation interceptors. - Interceptors smithyhttp.InterceptorRegistry - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - to.Interceptors = o.Interceptors.Copy() - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "com.amazonaws.s3#sigv4express" { - return getExpressIdentityResolver(o) - } - if schemeID == "aws.auth#sigv4a" { - return getSigV4AIdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func getSigV4AIdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &v4a.CredentialsProviderAdapter{ - Provider: &v4a.SymmetricCredentialAdaptor{ - SymmetricProvider: o.Credentials, - }, - } - } - return nil -} - -// WithSigV4ASigningRegions applies an override to the authentication workflow to -// use the given signing region set for SigV4A-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region set from both auth scheme resolution and endpoint -// resolution. -func WithSigV4ASigningRegions(regions []string) func(*Options) { - fn := func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, - ) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, regions) - return next.HandleFinalize(ctx, in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Finalize.Insert( - middleware.FinalizeMiddlewareFunc("withSigV4ASigningRegions", fn), - "Signing", - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} - -func getExpressIdentityResolver(o Options) smithyauth.IdentityResolver { - if o.ExpressCredentials != nil { - return &s3cust.ExpressIdentityResolver{Provider: o.ExpressCredentials} - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go deleted file mode 100644 index 491ed2e5da50..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go +++ /dev/null @@ -1,419 +0,0 @@ -package s3 - -import ( - "context" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - algorithmHeader = "X-Amz-Algorithm" - credentialHeader = "X-Amz-Credential" - dateHeader = "X-Amz-Date" - tokenHeader = "X-Amz-Security-Token" - signatureHeader = "X-Amz-Signature" - - algorithm = "AWS4-HMAC-SHA256" - aws4Request = "aws4_request" - bucketHeader = "bucket" - defaultExpiresIn = 15 * time.Minute - shortDateLayout = "20060102" -) - -// PresignPostObject is a special kind of [presigned request] used to send a request using -// form data, likely from an HTML form on a browser. -// Unlike other presigned operations, the return values of this function are not meant to be used directly -// to make an HTTP request but rather to be used as inputs to a form. See [the docs] for more information -// on how to use these values -// -// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html -// [the docs] https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html -func (c *PresignClient) PresignPostObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignPostOptions)) (*PresignedPostRequest, error) { - if params == nil { - params = &PutObjectInput{} - } - clientOptions := c.options.copy() - options := PresignPostOptions{ - Expires: clientOptions.Expires, - PostPresigner: &postSignAdapter{}, - } - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(clientOptions.ClientOptions, withNopHTTPClientAPIOption) - cvt := presignPostConverter(options) - result, _, err := c.client.invokeOperation(ctx, "$type:L", params, clientOptFns, - c.client.addOperationPutObjectMiddlewares, - cvt.ConvertToPresignMiddleware, - func(stack *middleware.Stack, options Options) error { - return awshttp.RemoveContentTypeHeader(stack) - }, - ) - if err != nil { - return nil, err - } - - out := result.(*PresignedPostRequest) - return out, nil -} - -// PresignedPostRequest represents a presigned request to be sent using HTTP verb POST and FormData -type PresignedPostRequest struct { - // Represents the Base URL to make a request to - URL string - // Values is a key-value map of values to be sent as FormData - // these values are not encoded - Values map[string]string -} - -// postSignAdapter adapter to implement the presignPost interface -type postSignAdapter struct{} - -// PresignPost creates a special kind of [presigned request] -// to be used with HTTP verb POST. -// It differs from PUT request mostly on -// 1. It accepts a new set of parameters, `Conditions[]`, that are used to create a policy doc to limit where an object can be posted to -// 2. The return value needs to have more processing since it's meant to be sent via a form and not stand on its own -// 3. There's no body to be signed, since that will be attached when the actual request is made -// 4. The signature is made based on the policy document, not the whole request -// More information can be found at https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html -// -// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html -func (s *postSignAdapter) PresignPost( - credentials aws.Credentials, - bucket string, key string, - region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, optFns ...func(*v4.SignerOptions), -) (fields map[string]string, err error) { - credentialScope := buildCredentialScope(signingTime, region, service) - credentialStr := credentials.AccessKeyID + "/" + credentialScope - - policyDoc, err := createPolicyDocument(expirationTime, signingTime, bucket, key, credentialStr, &credentials.SessionToken, conditions) - if err != nil { - return nil, err - } - - signature := buildSignature(policyDoc, credentials.SecretAccessKey, service, region, signingTime) - - fields = getPostSignRequiredFields(signingTime, credentialStr, credentials) - fields[signatureHeader] = signature - fields["key"] = key - fields["policy"] = policyDoc - - return fields, nil -} - -func getPostSignRequiredFields(t time.Time, credentialStr string, awsCredentials aws.Credentials) map[string]string { - fields := map[string]string{ - algorithmHeader: algorithm, - dateHeader: t.UTC().Format("20060102T150405Z"), - credentialHeader: credentialStr, - } - - sessionToken := awsCredentials.SessionToken - if len(sessionToken) > 0 { - fields[tokenHeader] = sessionToken - } - - return fields -} - -// PresignPost defines the interface to presign a POST request -type PresignPost interface { - PresignPost( - credentials aws.Credentials, - bucket string, key string, - region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (fields map[string]string, err error) -} - -// PresignPostOptions represent the options to be passed to a PresignPost sign request -type PresignPostOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // PostPresigner to use. One will be created if none is provided - PostPresigner PresignPost - - // Expires sets the expiration duration for the generated presign url. This should - // be the duration in seconds the presigned URL should be considered valid for. If - // not set or set to zero, presign url would default to expire after 900 seconds. - Expires time.Duration - - // Conditions a list of extra conditions to pass to the policy document - // Available conditions can be found [here] - // - // [here]https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions - Conditions []interface{} -} - -type presignPostConverter PresignPostOptions - -// presignPostRequestMiddlewareOptions is the options for the presignPostRequestMiddleware middleware. -type presignPostRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Presigner PresignPost - LogSigning bool - ExpiresIn time.Duration - Conditions []interface{} -} - -type presignPostRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - presigner PresignPost - logSigning bool - expiresIn time.Duration - conditions []interface{} -} - -// newPresignPostRequestMiddleware returns a new presignPostRequestMiddleware -// initialized with the presigner. -func newPresignPostRequestMiddleware(options presignPostRequestMiddlewareOptions) *presignPostRequestMiddleware { - return &presignPostRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - expiresIn: options.ExpiresIn, - conditions: options.Conditions, - } -} - -// ID provides the middleware ID. -func (*presignPostRequestMiddleware) ID() string { return "PresignPostRequestMiddleware" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (s *presignPostRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - - input := getOperationInput(ctx) - asS3Put, ok := input.(*PutObjectInput) - if !ok { - return out, metadata, fmt.Errorf("expected PutObjectInput") - } - bucketName, ok := asS3Put.bucket() - if !ok { - return out, metadata, fmt.Errorf("requested input bucketName not found on request") - } - uploadKey := asS3Put.Key - if uploadKey == nil { - return out, metadata, fmt.Errorf("PutObject input does not have a key input") - } - - uri := getS3ResolvedURI(ctx) - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &v4.SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - skew := internalcontext.GetAttemptSkewContext(ctx) - signingTime := sdk.NowTime().Add(skew) - expirationTime := signingTime.Add(s.expiresIn).UTC() - - fields, err := s.presigner.PresignPost( - credentials, - bucketName, - *uploadKey, - signingRegion, - signingName, - signingTime, - s.conditions, - expirationTime, - func(o *v4.SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &v4.SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &PresignedPostRequest{ - URL: uri, - Values: fields, - } - - return out, metadata, nil -} - -// Adapted from existing PresignConverter middleware -func (c presignPostConverter) ConvertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - stack.Build.Remove("UserAgent") - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - stack.Deserialize.Clear() - - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - // if no expiration is set, set one - expiresIn := c.Expires - if expiresIn == 0 { - expiresIn = defaultExpiresIn - } - - pmw := newPresignPostRequestMiddleware(presignPostRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.PostPresigner, - LogSigning: options.ClientLogMode.IsSigning(), - ExpiresIn: expiresIn, - Conditions: c.Conditions, - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func createPolicyDocument(expirationTime time.Time, signingTime time.Time, bucket string, key string, credentialString string, securityToken *string, extraConditions []interface{}) (string, error) { - initialConditions := []interface{}{ - map[string]string{ - algorithmHeader: algorithm, - }, - map[string]string{ - bucketHeader: bucket, - }, - map[string]string{ - credentialHeader: credentialString, - }, - map[string]string{ - dateHeader: signingTime.UTC().Format("20060102T150405Z"), - }, - } - - var conditions []interface{} - for _, v := range initialConditions { - conditions = append(conditions, v) - } - - if securityToken != nil && *securityToken != "" { - conditions = append(conditions, map[string]string{ - tokenHeader: *securityToken, - }) - } - - // append user-defined conditions at the end - conditions = append(conditions, extraConditions...) - - // The policy allows you to set a "key" value to specify what's the name of the - // key to add. Customers can add one by specifying one in their conditions, - // so we're checking if one has already been set. - // If none is found, restrict this to just the key name passed on the request - // This can be disabled by adding a condition that explicitly allows - // everything - if !isAlreadyCheckingForKey(conditions) { - conditions = append(conditions, map[string]string{"key": key}) - } - - policyDoc := map[string]interface{}{ - "conditions": conditions, - "expiration": expirationTime.Format(time.RFC3339), - } - - jsonBytes, err := json.Marshal(policyDoc) - if err != nil { - return "", err - } - - return base64.StdEncoding.EncodeToString(jsonBytes), nil -} - -func isAlreadyCheckingForKey(conditions []interface{}) bool { - // Need to check for two conditions: - // 1. A condition of the form ["starts-with", "$key", "mykey"] - // 2. A condition of the form {"key": "mykey"} - for _, c := range conditions { - slice, ok := c.([]interface{}) - if ok && len(slice) > 1 { - if slice[0] == "starts-with" && slice[1] == "$key" { - return true - } - } - m, ok := c.(map[string]interface{}) - if ok && len(m) > 0 { - for k := range m { - if k == "key" { - return true - } - } - } - // Repeat this but for map[string]string due to type constrains - ms, ok := c.(map[string]string) - if ok && len(ms) > 0 { - for k := range ms { - if k == "key" { - return true - } - } - } - } - return false -} - -// these methods have been copied from v4 implementation since they are not exported for public use -func hmacsha256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func buildSignature(strToSign, secret, service, region string, t time.Time) string { - key := deriveKey(secret, service, region, t) - return hex.EncodeToString(hmacsha256(key, []byte(strToSign))) -} - -func deriveKey(secret, service, region string, t time.Time) []byte { - hmacDate := hmacsha256([]byte("AWS4"+secret), []byte(t.UTC().Format(shortDateLayout))) - hmacRegion := hmacsha256(hmacDate, []byte(region)) - hmacService := hmacsha256(hmacRegion, []byte(service)) - return hmacsha256(hmacService, []byte(aws4Request)) -} - -func buildCredentialScope(signingTime time.Time, region, service string) string { - return strings.Join([]string{ - signingTime.UTC().Format(shortDateLayout), - region, - service, - aws4Request, - }, "/") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go deleted file mode 100644 index 4e34d1a22f3a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go +++ /dev/null @@ -1,77 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - "path" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// serializeImmutableHostnameBucketMiddleware handles injecting the bucket name into -// "immutable" hostnames resolved via v1 EndpointResolvers. This CANNOT be done in -// serialization, since v2 endpoint resolution requires removing the {Bucket} path -// segment from all S3 requests. -// -// This will only be done for non-ARN buckets, as the features that use those require -// virtualhost manipulation to function and we previously (pre-ep2) expected the caller -// to handle that in their resolver. -type serializeImmutableHostnameBucketMiddleware struct { - UsePathStyle bool -} - -func (*serializeImmutableHostnameBucketMiddleware) ID() string { - return "serializeImmutableHostnameBucket" -} - -func (m *serializeImmutableHostnameBucketMiddleware) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - bucket, ok := bucketFromInput(in.Parameters) - if !ok { - return next.HandleSerialize(ctx, in) - } - - // a bucket being un-vhostable will also force us to use path style - usePathStyle := m.UsePathStyle || !awsrulesfn.IsVirtualHostableS3Bucket(bucket, request.URL.Scheme != "https") - - if !smithyhttp.GetHostnameImmutable(ctx) && - !(awsmiddleware.GetRequiresLegacyEndpoints(ctx) && usePathStyle) { - return next.HandleSerialize(ctx, in) - } - - parsedBucket := awsrulesfn.ParseARN(bucket) - - // disallow ARN buckets except for MRAP arns - if parsedBucket != nil && len(parsedBucket.Region) > 0 { - return next.HandleSerialize(ctx, in) - } - - request.URL.Path = path.Join(request.URL.Path, bucket) - request.URL.RawPath = path.Join(request.URL.RawPath, httpbinding.EscapePath(bucket, true)) - - return next.HandleSerialize(ctx, in) -} - -func addSerializeImmutableHostnameBucketMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert( - &serializeImmutableHostnameBucketMiddleware{ - UsePathStyle: options.UsePathStyle, - }, - "OperationSerializer", - middleware.Before, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go deleted file mode 100644 index 050717e85592..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ /dev/null @@ -1,14698 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyxml "github.com/aws/smithy-go/encoding/xml" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "strconv" - "strings" -) - -type awsRestxml_serializeOpAbortMultipartUpload struct { -} - -func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AbortMultipartUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=AbortMultipartUpload") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatchInitiatedTime != nil { - locationName := "X-Amz-If-Match-Initiated-Time" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchInitiatedTime)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpCompleteMultipartUpload struct { -} - -func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CompleteMultipartUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MultipartUpload != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CompleteMultipartUpload", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if len(v.ChecksumType) > 0 { - locationName := "X-Amz-Checksum-Type" - encoder.SetHeader(locationName).String(string(v.ChecksumType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MpuObjectSize != nil { - locationName := "X-Amz-Mp-Object-Size" - encoder.SetHeader(locationName).Long(*v.MpuObjectSize) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpCopyObject struct { -} - -func (*awsRestxml_serializeOpCopyObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CopyObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CopyObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentDisposition != nil { - locationName := "Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentType != nil { - locationName := "Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.CopySource != nil { - locationName := "X-Amz-Copy-Source" - encoder.SetHeader(locationName).String(*v.CopySource) - } - - if v.CopySourceIfMatch != nil { - locationName := "X-Amz-Copy-Source-If-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) - } - - if v.CopySourceIfModifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) - } - - if v.CopySourceIfNoneMatch != nil { - locationName := "X-Amz-Copy-Source-If-None-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) - } - - if v.CopySourceIfUnmodifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) - } - - if v.CopySourceSSECustomerAlgorithm != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) - } - - if v.CopySourceSSECustomerKey != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) - } - - if v.CopySourceSSECustomerKeyMD5 != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.ExpectedSourceBucketOwner != nil { - locationName := "X-Amz-Source-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) - } - - if v.Expires != nil { - locationName := "Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if len(v.MetadataDirective) > 0 { - locationName := "X-Amz-Metadata-Directive" - encoder.SetHeader(locationName).String(string(v.MetadataDirective)) - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.Tagging != nil { - locationName := "X-Amz-Tagging" - encoder.SetHeader(locationName).String(*v.Tagging) - } - - if len(v.TaggingDirective) > 0 { - locationName := "X-Amz-Tagging-Directive" - encoder.SetHeader(locationName).String(string(v.TaggingDirective)) - } - - if v.WebsiteRedirectLocation != nil { - locationName := "X-Amz-Website-Redirect-Location" - encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) - } - - return nil -} - -type awsRestxml_serializeOpCreateBucket struct { -} - -func (*awsRestxml_serializeOpCreateBucket) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateBucketInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.CreateBucketConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CreateBucketConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWrite != nil { - locationName := "X-Amz-Grant-Write" - encoder.SetHeader(locationName).String(*v.GrantWrite) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.ObjectLockEnabledForBucket != nil { - locationName := "X-Amz-Bucket-Object-Lock-Enabled" - encoder.SetHeader(locationName).Boolean(*v.ObjectLockEnabledForBucket) - } - - if len(v.ObjectOwnership) > 0 { - locationName := "X-Amz-Object-Ownership" - encoder.SetHeader(locationName).String(string(v.ObjectOwnership)) - } - - return nil -} - -type awsRestxml_serializeOpCreateBucketMetadataConfiguration struct { -} - -func (*awsRestxml_serializeOpCreateBucketMetadataConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateBucketMetadataConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateBucketMetadataConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MetadataConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetadataConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentMetadataConfiguration(input.MetadataConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataConfigurationInput(v *CreateBucketMetadataConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpCreateBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MetadataTableConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetadataTableConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentMetadataTableConfiguration(input.MetadataTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpCreateMultipartUpload struct { -} - -func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateMultipartUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if len(v.ChecksumType) > 0 { - locationName := "X-Amz-Checksum-Type" - encoder.SetHeader(locationName).String(string(v.ChecksumType)) - } - - if v.ContentDisposition != nil { - locationName := "Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentType != nil { - locationName := "Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Expires != nil { - locationName := "Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.Tagging != nil { - locationName := "X-Amz-Tagging" - encoder.SetHeader(locationName).String(*v.Tagging) - } - - if v.WebsiteRedirectLocation != nil { - locationName := "X-Amz-Website-Redirect-Location" - encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) - } - - return nil -} - -type awsRestxml_serializeOpCreateSession struct { -} - -func (*awsRestxml_serializeOpCreateSession) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateSessionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?session") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateSessionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if len(v.SessionMode) > 0 { - locationName := "X-Amz-Create-Session-Mode" - encoder.SetHeader(locationName).String(string(v.SessionMode)) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucket struct { -} - -func (*awsRestxml_serializeOpDeleteBucket) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketCors struct { -} - -func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketCorsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?cors") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketEncryption struct { -} - -func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketEncryptionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?encryption") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketLifecycle struct { -} - -func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketLifecycleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?lifecycle") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketMetadataConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketMetadataConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketMetadataConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketMetadataConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataConfigurationInput(v *DeleteBucketMetadataConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketOwnershipControls struct { -} - -func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketPolicy struct { -} - -func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketReplication struct { -} - -func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketReplicationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?replication") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketTagging struct { -} - -func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketWebsite struct { -} - -func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketWebsiteInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?website") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteObject struct { -} - -func (*awsRestxml_serializeOpDeleteObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=DeleteObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BypassGovernanceRetention != nil { - locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfMatchLastModifiedTime != nil { - locationName := "X-Amz-If-Match-Last-Modified-Time" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchLastModifiedTime)) - } - - if v.IfMatchSize != nil { - locationName := "X-Amz-If-Match-Size" - encoder.SetHeader(locationName).Long(*v.IfMatchSize) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MFA != nil { - locationName := "X-Amz-Mfa" - encoder.SetHeader(locationName).String(*v.MFA) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpDeleteObjects struct { -} - -func (*awsRestxml_serializeOpDeleteObjects) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteObjectsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?delete") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Delete != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Delete", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BypassGovernanceRetention != nil { - locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.MFA != nil { - locationName := "X-Amz-Mfa" - encoder.SetHeader(locationName).String(*v.MFA) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpDeleteObjectTagging struct { -} - -func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteObjectTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpDeletePublicAccessBlock struct { -} - -func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeletePublicAccessBlockInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?accelerate") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketAcl struct { -} - -func (*awsRestxml_serializeOpGetBucketAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=GetBucketAnalyticsConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketCors struct { -} - -func (*awsRestxml_serializeOpGetBucketCors) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketCorsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?cors") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketEncryption struct { -} - -func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketEncryptionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?encryption") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketInventoryConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=GetBucketInventoryConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?lifecycle") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketLocation struct { -} - -func (*awsRestxml_serializeOpGetBucketLocation) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketLocationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?location") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketLogging struct { -} - -func (*awsRestxml_serializeOpGetBucketLogging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketLoggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?logging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketMetadataConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketMetadataConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketMetadataConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketMetadataConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketMetadataConfigurationInput(v *GetBucketMetadataConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketMetadataTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketMetricsConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=GetBucketMetricsConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketNotificationConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?notification") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketOwnershipControls struct { -} - -func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketPolicy struct { -} - -func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketPolicyStatus struct { -} - -func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketPolicyStatusInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policyStatus") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketReplication struct { -} - -func (*awsRestxml_serializeOpGetBucketReplication) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketReplicationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?replication") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketRequestPayment struct { -} - -func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketRequestPaymentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?requestPayment") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketTagging struct { -} - -func (*awsRestxml_serializeOpGetBucketTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketVersioning struct { -} - -func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketVersioningInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?versioning") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketWebsite struct { -} - -func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketWebsiteInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?website") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetObject struct { -} - -func (*awsRestxml_serializeOpGetObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=GetObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumMode) > 0 { - locationName := "X-Amz-Checksum-Mode" - encoder.SetHeader(locationName).String(string(v.ChecksumMode)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfModifiedSince != nil { - locationName := "If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.IfUnmodifiedSince != nil { - locationName := "If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if v.Range != nil { - locationName := "Range" - encoder.SetHeader(locationName).String(*v.Range) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.ResponseCacheControl != nil { - encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) - } - - if v.ResponseContentDisposition != nil { - encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) - } - - if v.ResponseContentEncoding != nil { - encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) - } - - if v.ResponseContentLanguage != nil { - encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) - } - - if v.ResponseContentType != nil { - encoder.SetQuery("response-content-type").String(*v.ResponseContentType) - } - - if v.ResponseExpires != nil { - encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectAcl struct { -} - -func (*awsRestxml_serializeOpGetObjectAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectAttributes struct { -} - -func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectAttributesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?attributes") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MaxParts != nil { - locationName := "X-Amz-Max-Parts" - encoder.SetHeader(locationName).Integer(*v.MaxParts) - } - - if v.ObjectAttributes != nil { - locationName := "X-Amz-Object-Attributes" - if len(v.ObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.ObjectAttributes { - if len(v.ObjectAttributes[i]) > 0 { - escaped := string(v.ObjectAttributes[i]) - if strings.Index(string(v.ObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.ObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.ObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.PartNumberMarker != nil { - locationName := "X-Amz-Part-Number-Marker" - encoder.SetHeader(locationName).String(*v.PartNumberMarker) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectLegalHold struct { -} - -func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectLegalHoldInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectLockConfiguration struct { -} - -func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectLockConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?object-lock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectRetention struct { -} - -func (*awsRestxml_serializeOpGetObjectRetention) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectRetentionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectTagging struct { -} - -func (*awsRestxml_serializeOpGetObjectTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectTorrent struct { -} - -func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectTorrentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?torrent") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpGetPublicAccessBlock struct { -} - -func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetPublicAccessBlockInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpHeadBucket struct { -} - -func (*awsRestxml_serializeOpHeadBucket) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*HeadBucketInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "HEAD" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpHeadObject struct { -} - -func (*awsRestxml_serializeOpHeadObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*HeadObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "HEAD" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumMode) > 0 { - locationName := "X-Amz-Checksum-Mode" - encoder.SetHeader(locationName).String(string(v.ChecksumMode)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfModifiedSince != nil { - locationName := "If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.IfUnmodifiedSince != nil { - locationName := "If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if v.Range != nil { - locationName := "Range" - encoder.SetHeader(locationName).String(*v.Range) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.ResponseCacheControl != nil { - encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) - } - - if v.ResponseContentDisposition != nil { - encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) - } - - if v.ResponseContentEncoding != nil { - encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) - } - - if v.ResponseContentLanguage != nil { - encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) - } - - if v.ResponseContentType != nil { - encoder.SetQuery("response-content-type").String(*v.ResponseContentType) - } - - if v.ResponseExpires != nil { - encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=ListBucketAnalyticsConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBucketInventoryConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=ListBucketInventoryConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBucketMetricsConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=ListBucketMetricsConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBuckets struct { -} - -func (*awsRestxml_serializeOpListBuckets) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?x-id=ListBuckets") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BucketRegion != nil { - encoder.SetQuery("bucket-region").String(*v.BucketRegion) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.MaxBuckets != nil { - encoder.SetQuery("max-buckets").Integer(*v.MaxBuckets) - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - return nil -} - -type awsRestxml_serializeOpListDirectoryBuckets struct { -} - -func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListDirectoryBucketsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?x-id=ListDirectoryBuckets") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.MaxDirectoryBuckets != nil { - encoder.SetQuery("max-directory-buckets").Integer(*v.MaxDirectoryBuckets) - } - - return nil -} - -type awsRestxml_serializeOpListMultipartUploads struct { -} - -func (*awsRestxml_serializeOpListMultipartUploads) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListMultipartUploadsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?uploads") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.KeyMarker != nil { - encoder.SetQuery("key-marker").String(*v.KeyMarker) - } - - if v.MaxUploads != nil { - encoder.SetQuery("max-uploads").Integer(*v.MaxUploads) - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.UploadIdMarker != nil { - encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker) - } - - return nil -} - -type awsRestxml_serializeOpListObjects struct { -} - -func (*awsRestxml_serializeOpListObjects) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListObjectsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Marker != nil { - encoder.SetQuery("marker").String(*v.Marker) - } - - if v.MaxKeys != nil { - encoder.SetQuery("max-keys").Integer(*v.MaxKeys) - } - - if v.OptionalObjectAttributes != nil { - locationName := "X-Amz-Optional-Object-Attributes" - if len(v.OptionalObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.OptionalObjectAttributes { - if len(v.OptionalObjectAttributes[i]) > 0 { - escaped := string(v.OptionalObjectAttributes[i]) - if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpListObjectsV2 struct { -} - -func (*awsRestxml_serializeOpListObjectsV2) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListObjectsV2Input) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?list-type=2") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.FetchOwner != nil { - encoder.SetQuery("fetch-owner").Boolean(*v.FetchOwner) - } - - if v.MaxKeys != nil { - encoder.SetQuery("max-keys").Integer(*v.MaxKeys) - } - - if v.OptionalObjectAttributes != nil { - locationName := "X-Amz-Optional-Object-Attributes" - if len(v.OptionalObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.OptionalObjectAttributes { - if len(v.OptionalObjectAttributes[i]) > 0 { - escaped := string(v.OptionalObjectAttributes[i]) - if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.StartAfter != nil { - encoder.SetQuery("start-after").String(*v.StartAfter) - } - - return nil -} - -type awsRestxml_serializeOpListObjectVersions struct { -} - -func (*awsRestxml_serializeOpListObjectVersions) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListObjectVersionsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?versions") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.KeyMarker != nil { - encoder.SetQuery("key-marker").String(*v.KeyMarker) - } - - if v.MaxKeys != nil { - encoder.SetQuery("max-keys").Integer(*v.MaxKeys) - } - - if v.OptionalObjectAttributes != nil { - locationName := "X-Amz-Optional-Object-Attributes" - if len(v.OptionalObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.OptionalObjectAttributes { - if len(v.OptionalObjectAttributes[i]) > 0 { - escaped := string(v.OptionalObjectAttributes[i]) - if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionIdMarker != nil { - encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker) - } - - return nil -} - -type awsRestxml_serializeOpListParts struct { -} - -func (*awsRestxml_serializeOpListParts) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListPartsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=ListParts") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MaxParts != nil { - encoder.SetQuery("max-parts").Integer(*v.MaxParts) - } - - if v.PartNumberMarker != nil { - encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?accelerate") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AccelerateConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccelerateConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketAcl struct { -} - -func (*awsRestxml_serializeOpPutBucketAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AccessControlPolicy != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlPolicy", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWrite != nil { - locationName := "X-Amz-Grant-Write" - encoder.SetHeader(locationName).String(*v.GrantWrite) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AnalyticsConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AnalyticsConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketCors struct { -} - -func (*awsRestxml_serializeOpPutBucketCors) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketCorsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?cors") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.CORSConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CORSConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketEncryption struct { -} - -func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketEncryptionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?encryption") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.ServerSideEncryptionConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ServerSideEncryptionConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.IntelligentTieringConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IntelligentTieringConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketInventoryConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.InventoryConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InventoryConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?lifecycle") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.LifecycleConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LifecycleConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if len(v.TransitionDefaultMinimumObjectSize) > 0 { - locationName := "X-Amz-Transition-Default-Minimum-Object-Size" - encoder.SetHeader(locationName).String(string(v.TransitionDefaultMinimumObjectSize)) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketLogging struct { -} - -func (*awsRestxml_serializeOpPutBucketLogging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketLoggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?logging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.BucketLoggingStatus != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketLoggingStatus", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketMetricsConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MetricsConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetricsConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketNotificationConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?notification") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.NotificationConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NotificationConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.SkipDestinationValidation != nil { - locationName := "X-Amz-Skip-Destination-Validation" - encoder.SetHeader(locationName).Boolean(*v.SkipDestinationValidation) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketOwnershipControls struct { -} - -func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.OwnershipControls != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OwnershipControls", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketPolicy struct { -} - -func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("text/plain") - } - - if input.Policy != nil { - payload := strings.NewReader(*input.Policy) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ConfirmRemoveSelfBucketAccess != nil { - locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" - encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketReplication struct { -} - -func (*awsRestxml_serializeOpPutBucketReplication) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketReplicationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?replication") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.ReplicationConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicationConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Token != nil { - locationName := "X-Amz-Bucket-Object-Lock-Token" - encoder.SetHeader(locationName).String(*v.Token) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketRequestPayment struct { -} - -func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketRequestPaymentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?requestPayment") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.RequestPaymentConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RequestPaymentConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketTagging struct { -} - -func (*awsRestxml_serializeOpPutBucketTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Tagging != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tagging", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketVersioning struct { -} - -func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketVersioningInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?versioning") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.VersioningConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "VersioningConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.MFA != nil { - locationName := "X-Amz-Mfa" - encoder.SetHeader(locationName).String(*v.MFA) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketWebsite struct { -} - -func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketWebsiteInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?website") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.WebsiteConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "WebsiteConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutObject struct { -} - -func (*awsRestxml_serializeOpPutObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=PutObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/octet-stream") - } - - if input.Body != nil { - payload := input.Body - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if v.ContentDisposition != nil { - locationName := "Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentLength != nil { - locationName := "Content-Length" - encoder.SetHeader(locationName).Long(*v.ContentLength) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ContentType != nil { - locationName := "Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Expires != nil { - locationName := "Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.Tagging != nil { - locationName := "X-Amz-Tagging" - encoder.SetHeader(locationName).String(*v.Tagging) - } - - if v.WebsiteRedirectLocation != nil { - locationName := "X-Amz-Website-Redirect-Location" - encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) - } - - if v.WriteOffsetBytes != nil { - locationName := "X-Amz-Write-Offset-Bytes" - encoder.SetHeader(locationName).Long(*v.WriteOffsetBytes) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectAcl struct { -} - -func (*awsRestxml_serializeOpPutObjectAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AccessControlPolicy != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlPolicy", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWrite != nil { - locationName := "X-Amz-Grant-Write" - encoder.SetHeader(locationName).String(*v.GrantWrite) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectLegalHold struct { -} - -func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectLegalHoldInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.LegalHold != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LegalHold", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectLockConfiguration struct { -} - -func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectLockConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?object-lock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.ObjectLockConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectLockConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.Token != nil { - locationName := "X-Amz-Bucket-Object-Lock-Token" - encoder.SetHeader(locationName).String(*v.Token) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectRetention struct { -} - -func (*awsRestxml_serializeOpPutObjectRetention) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectRetentionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Retention != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Retention", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BypassGovernanceRetention != nil { - locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectTagging struct { -} - -func (*awsRestxml_serializeOpPutObjectTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Tagging != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tagging", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutPublicAccessBlock struct { -} - -func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutPublicAccessBlockInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.PublicAccessBlockConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PublicAccessBlockConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpRenameObject struct { -} - -func (*awsRestxml_serializeOpRenameObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpRenameObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*RenameObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?renameObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsRenameObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsRenameObjectInput(v *RenameObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ClientToken != nil { - locationName := "X-Amz-Client-Token" - encoder.SetHeader(locationName).String(*v.ClientToken) - } - - if v.DestinationIfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.DestinationIfMatch) - } - - if v.DestinationIfModifiedSince != nil { - locationName := "If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.DestinationIfModifiedSince)) - } - - if v.DestinationIfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.DestinationIfNoneMatch) - } - - if v.DestinationIfUnmodifiedSince != nil { - locationName := "If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.DestinationIfUnmodifiedSince)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.RenameSource != nil { - locationName := "X-Amz-Rename-Source" - encoder.SetHeader(locationName).String(*v.RenameSource) - } - - if v.SourceIfMatch != nil { - locationName := "X-Amz-Rename-Source-If-Match" - encoder.SetHeader(locationName).String(*v.SourceIfMatch) - } - - if v.SourceIfModifiedSince != nil { - locationName := "X-Amz-Rename-Source-If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.SourceIfModifiedSince)) - } - - if v.SourceIfNoneMatch != nil { - locationName := "X-Amz-Rename-Source-If-None-Match" - encoder.SetHeader(locationName).String(*v.SourceIfNoneMatch) - } - - if v.SourceIfUnmodifiedSince != nil { - locationName := "X-Amz-Rename-Source-If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.SourceIfUnmodifiedSince)) - } - - return nil -} - -type awsRestxml_serializeOpRestoreObject struct { -} - -func (*awsRestxml_serializeOpRestoreObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*RestoreObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.RestoreRequest != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RestoreRequest", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpSelectObjectContent struct { -} - -func (*awsRestxml_serializeOpSelectObjectContent) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*SelectObjectContentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/xml") - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SelectObjectContentRequest", - }, - Attr: rootAttr, - } - root.Attr = append(root.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeOpDocumentSelectObjectContentInput(input, xmlEncoder.RootElement(root)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - if request, err = request.SetStream(bytes.NewReader(xmlEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - return nil -} - -func awsRestxml_serializeOpDocumentSelectObjectContentInput(v *SelectObjectContentInput, value smithyxml.Value) error { - defer value.Close() - if v.Expression != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expression", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Expression) - } - if len(v.ExpressionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExpressionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ExpressionType)) - } - if v.InputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { - return err - } - } - if v.OutputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { - return err - } - } - if v.RequestProgress != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RequestProgress", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRequestProgress(v.RequestProgress, el); err != nil { - return err - } - } - if v.ScanRange != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ScanRange", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentScanRange(v.ScanRange, el); err != nil { - return err - } - } - return nil -} - -type awsRestxml_serializeOpUpdateBucketMetadataInventoryTableConfiguration struct { -} - -func (*awsRestxml_serializeOpUpdateBucketMetadataInventoryTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpUpdateBucketMetadataInventoryTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UpdateBucketMetadataInventoryTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataInventoryTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsUpdateBucketMetadataInventoryTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.InventoryTableConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InventoryTableConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentInventoryTableConfigurationUpdates(input.InventoryTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsUpdateBucketMetadataInventoryTableConfigurationInput(v *UpdateBucketMetadataInventoryTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpUpdateBucketMetadataJournalTableConfiguration struct { -} - -func (*awsRestxml_serializeOpUpdateBucketMetadataJournalTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpUpdateBucketMetadataJournalTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UpdateBucketMetadataJournalTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataJournalTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsUpdateBucketMetadataJournalTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.JournalTableConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "JournalTableConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentJournalTableConfigurationUpdates(input.JournalTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsUpdateBucketMetadataJournalTableConfigurationInput(v *UpdateBucketMetadataJournalTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpUploadPart struct { -} - -func (*awsRestxml_serializeOpUploadPart) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UploadPartInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPart") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/octet-stream") - } - - if input.Body != nil { - payload := input.Body - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if v.ContentLength != nil { - locationName := "Content-Length" - encoder.SetHeader(locationName).Long(*v.ContentLength) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpUploadPartCopy struct { -} - -func (*awsRestxml_serializeOpUploadPartCopy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UploadPartCopyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPartCopy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.CopySource != nil { - locationName := "X-Amz-Copy-Source" - encoder.SetHeader(locationName).String(*v.CopySource) - } - - if v.CopySourceIfMatch != nil { - locationName := "X-Amz-Copy-Source-If-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) - } - - if v.CopySourceIfModifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) - } - - if v.CopySourceIfNoneMatch != nil { - locationName := "X-Amz-Copy-Source-If-None-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) - } - - if v.CopySourceIfUnmodifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) - } - - if v.CopySourceRange != nil { - locationName := "X-Amz-Copy-Source-Range" - encoder.SetHeader(locationName).String(*v.CopySourceRange) - } - - if v.CopySourceSSECustomerAlgorithm != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) - } - - if v.CopySourceSSECustomerKey != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) - } - - if v.CopySourceSSECustomerKeyMD5 != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.ExpectedSourceBucketOwner != nil { - locationName := "X-Amz-Source-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpWriteGetObjectResponse struct { -} - -func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*WriteGetObjectResponseInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/octet-stream") - } - - if input.Body != nil { - payload := input.Body - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AcceptRanges != nil { - locationName := "X-Amz-Fwd-Header-Accept-Ranges" - encoder.SetHeader(locationName).String(*v.AcceptRanges) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "X-Amz-Fwd-Header-Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if v.ContentDisposition != nil { - locationName := "X-Amz-Fwd-Header-Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "X-Amz-Fwd-Header-Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "X-Amz-Fwd-Header-Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentLength != nil { - locationName := "Content-Length" - encoder.SetHeader(locationName).Long(*v.ContentLength) - } - - if v.ContentRange != nil { - locationName := "X-Amz-Fwd-Header-Content-Range" - encoder.SetHeader(locationName).String(*v.ContentRange) - } - - if v.ContentType != nil { - locationName := "X-Amz-Fwd-Header-Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.DeleteMarker != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" - encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) - } - - if v.ErrorCode != nil { - locationName := "X-Amz-Fwd-Error-Code" - encoder.SetHeader(locationName).String(*v.ErrorCode) - } - - if v.ErrorMessage != nil { - locationName := "X-Amz-Fwd-Error-Message" - encoder.SetHeader(locationName).String(*v.ErrorMessage) - } - - if v.ETag != nil { - locationName := "X-Amz-Fwd-Header-Etag" - encoder.SetHeader(locationName).String(*v.ETag) - } - - if v.Expiration != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" - encoder.SetHeader(locationName).String(*v.Expiration) - } - - if v.Expires != nil { - locationName := "X-Amz-Fwd-Header-Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.LastModified != nil { - locationName := "X-Amz-Fwd-Header-Last-Modified" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.LastModified)) - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if v.MissingMeta != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" - encoder.SetHeader(locationName).Integer(*v.MissingMeta) - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if v.PartsCount != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" - encoder.SetHeader(locationName).Integer(*v.PartsCount) - } - - if len(v.ReplicationStatus) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Replication-Status" - encoder.SetHeader(locationName).String(string(v.ReplicationStatus)) - } - - if len(v.RequestCharged) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Request-Charged" - encoder.SetHeader(locationName).String(string(v.RequestCharged)) - } - - if v.RequestRoute != nil { - locationName := "X-Amz-Request-Route" - encoder.SetHeader(locationName).String(*v.RequestRoute) - } - - if v.RequestToken != nil { - locationName := "X-Amz-Request-Token" - encoder.SetHeader(locationName).String(*v.RequestToken) - } - - if v.Restore != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Restore" - encoder.SetHeader(locationName).String(*v.Restore) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if v.StatusCode != nil { - locationName := "X-Amz-Fwd-Status" - encoder.SetHeader(locationName).Integer(*v.StatusCode) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.TagCount != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" - encoder.SetHeader(locationName).Integer(*v.TagCount) - } - - if v.VersionId != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" - encoder.SetHeader(locationName).String(*v.VersionId) - } - - return nil -} - -func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { - defer value.Close() - if v.DaysAfterInitiation != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DaysAfterInitiation", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.DaysAfterInitiation) - } - return nil -} - -func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error { - defer value.Close() - if v.Grants != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlList", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil { - return err - } - } - if v.Owner != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Owner", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error { - defer value.Close() - if len(v.Owner) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Owner", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Owner)) - } - return nil -} - -func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.StorageClassAnalysis != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClassAnalysis", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error { - defer value.Close() - if v.S3BucketDestination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3BucketDestination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error { - defer value.Close() - switch uv := v.(type) { - case *types.AnalyticsFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil { - return err - } - - case *types.AnalyticsFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.AnalyticsFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { - return err - } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error { - defer value.Close() - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Bucket) - } - if v.BucketAccountId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketAccountId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.BucketAccountId) - } - if len(v.Format) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Format", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Format)) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - return nil -} - -func awsRestxml_serializeDocumentBucketInfo(v *types.BucketInfo, value smithyxml.Value) error { - defer value.Close() - if len(v.DataRedundancy) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DataRedundancy", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.DataRedundancy)) - } - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error { - defer value.Close() - if v.LoggingEnabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LoggingEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error { - defer value.Close() - if v.Parts != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Part", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error { - defer value.Close() - if v.ChecksumCRC32 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumCRC32", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumCRC32) - } - if v.ChecksumCRC32C != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumCRC32C", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumCRC32C) - } - if v.ChecksumCRC64NVME != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumCRC64NVME", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumCRC64NVME) - } - if v.ChecksumSHA1 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumSHA1", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumSHA1) - } - if v.ChecksumSHA256 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumSHA256", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumSHA256) - } - if v.ETag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ETag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ETag) - } - if v.PartNumber != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PartNumber", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.PartNumber) - } - return nil -} - -func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error { - defer value.Close() - if v.HttpErrorCodeReturnedEquals != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HttpErrorCodeReturnedEquals", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HttpErrorCodeReturnedEquals) - } - if v.KeyPrefixEquals != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KeyPrefixEquals", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KeyPrefixEquals) - } - return nil -} - -func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.CORSRules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CORSRule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error { - defer value.Close() - if v.AllowedHeaders != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowedHeader", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil { - return err - } - } - if v.AllowedMethods != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowedMethod", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil { - return err - } - } - if v.AllowedOrigins != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowedOrigin", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil { - return err - } - } - if v.ExposeHeaders != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExposeHeader", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil { - return err - } - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.MaxAgeSeconds != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MaxAgeSeconds", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.MaxAgeSeconds) - } - return nil -} - -func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentBucketInfo(v.Bucket, el); err != nil { - return err - } - } - if v.Location != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Location", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLocationInfo(v.Location, el); err != nil { - return err - } - } - if len(v.LocationConstraint) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LocationConstraint", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.LocationConstraint)) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tags", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { - defer value.Close() - if v.AllowQuotedRecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowQuotedRecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.AllowQuotedRecordDelimiter) - } - if v.Comments != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Comments", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Comments) - } - if v.FieldDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FieldDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.FieldDelimiter) - } - if len(v.FileHeaderInfo) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FileHeaderInfo", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.FileHeaderInfo)) - } - if v.QuoteCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteCharacter) - } - if v.QuoteEscapeCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteEscapeCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteEscapeCharacter) - } - if v.RecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.RecordDelimiter) - } - return nil -} - -func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error { - defer value.Close() - if v.FieldDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FieldDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.FieldDelimiter) - } - if v.QuoteCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteCharacter) - } - if v.QuoteEscapeCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteEscapeCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteEscapeCharacter) - } - if len(v.QuoteFields) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteFields", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.QuoteFields)) - } - if v.RecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.RecordDelimiter) - } - return nil -} - -func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { - defer value.Close() - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if len(v.Mode) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Mode", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Mode)) - } - if v.Years != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Years", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Years) - } - return nil -} - -func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error { - defer value.Close() - if v.Objects != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Object", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil { - return err - } - } - if v.Quiet != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Quiet", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.Quiet) - } - return nil -} - -func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error { - defer value.Close() - if v.AccessControlTranslation != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlTranslation", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil { - return err - } - } - if v.Account != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Account", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Account) - } - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Bucket) - } - if v.EncryptionConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { - return err - } - } - if v.Metrics != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Metrics", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil { - return err - } - } - if v.ReplicationTime != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicationTime", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil { - return err - } - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - return nil -} - -func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error { - defer value.Close() - if len(v.EncryptionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.EncryptionType)) - } - if v.KMSContext != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KMSContext", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KMSContext) - } - if v.KMSKeyId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KMSKeyId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KMSKeyId) - } - return nil -} - -func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.ReplicaKmsKeyID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicaKmsKeyID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ReplicaKmsKeyID) - } - return nil -} - -func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error { - defer value.Close() - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Key) - } - return nil -} - -func awsRestxml_serializeDocumentEventBridgeConfiguration(v *types.EventBridgeConfiguration, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(string(v[i])) - } - return nil -} - -func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error { - defer value.Close() - if len(v.Name) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Name", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Name)) - } - if v.Value != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Value", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Value) - } - return nil -} - -func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error { - defer value.Close() - if len(v.Tier) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tier", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Tier)) - } - return nil -} - -func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error { - defer value.Close() - if v.Grantee != nil { - rootAttr := []smithyxml.Attr{} - rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) - if len(v.Grantee.Type) > 0 { - var av string - av = string(v.Grantee.Type) - rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) - } - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grantee", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { - return err - } - } - if len(v.Permission) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Permission", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Permission)) - } - return nil -} - -func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error { - defer value.Close() - if v.DisplayName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DisplayName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.DisplayName) - } - if v.EmailAddress != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EmailAddress", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.EmailAddress) - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.URI != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "URI", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.URI) - } - return nil -} - -func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grant", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error { - defer value.Close() - if v.Suffix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Suffix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Suffix) - } - return nil -} - -func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error { - defer value.Close() - if len(v.CompressionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CompressionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.CompressionType)) - } - if v.CSV != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CSV", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil { - return err - } - } - if v.JSON != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "JSON", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil { - return err - } - } - if v.Parquet != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Parquet", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - if v.Tierings != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tiering", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error { - defer value.Close() - if v.And != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Destination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Destination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if len(v.IncludedObjectVersions) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IncludedObjectVersions", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.IncludedObjectVersions)) - } - if v.IsEnabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IsEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.IsEnabled) - } - if v.OptionalFields != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OptionalFields", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil { - return err - } - } - if v.Schedule != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Schedule", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error { - defer value.Close() - if v.S3BucketDestination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3BucketDestination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error { - defer value.Close() - if v.SSEKMS != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SSE-KMS", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil { - return err - } - } - if v.SSES3 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SSE-S3", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - return nil -} - -func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Field", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - am.String(string(v[i])) - } - return nil -} - -func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error { - defer value.Close() - if v.AccountId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccountId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.AccountId) - } - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Bucket) - } - if v.Encryption != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Encryption", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil { - return err - } - } - if len(v.Format) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Format", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Format)) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - return nil -} - -func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error { - defer value.Close() - if len(v.Frequency) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Frequency", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Frequency)) - } - return nil -} - -func awsRestxml_serializeDocumentInventoryTableConfiguration(v *types.InventoryTableConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.ConfigurationState) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ConfigurationState", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ConfigurationState)) - } - if v.EncryptionConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetadataTableEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryTableConfigurationUpdates(v *types.InventoryTableConfigurationUpdates, value smithyxml.Value) error { - defer value.Close() - if len(v.ConfigurationState) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ConfigurationState", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ConfigurationState)) - } - if v.EncryptionConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetadataTableEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentJournalTableConfiguration(v *types.JournalTableConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.EncryptionConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetadataTableEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { - return err - } - } - if v.RecordExpiration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordExpiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRecordExpiration(v.RecordExpiration, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentJournalTableConfigurationUpdates(v *types.JournalTableConfigurationUpdates, value smithyxml.Value) error { - defer value.Close() - if v.RecordExpiration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordExpiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRecordExpiration(v.RecordExpiration, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error { - defer value.Close() - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error { - defer value.Close() - if v.RecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.RecordDelimiter) - } - return nil -} - -func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Events != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Event", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.LambdaFunctionArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CloudFunction", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.LambdaFunctionArn) - } - return nil -} - -func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error { - defer value.Close() - if v.Date != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Date", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatDateTime(*v.Date)) - } - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if v.ExpiredObjectDeleteMarker != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExpiredObjectDeleteMarker", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.ExpiredObjectDeleteMarker) - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error { - defer value.Close() - if v.AbortIncompleteMultipartUpload != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AbortIncompleteMultipartUpload", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil { - return err - } - } - if v.Expiration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil { - return err - } - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.NoncurrentVersionExpiration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentVersionExpiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil { - return err - } - } - if v.NoncurrentVersionTransitions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentVersionTransition", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - if v.Transitions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Transition", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.ObjectSizeGreaterThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeGreaterThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeGreaterThan) - } - if v.ObjectSizeLessThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeLessThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeLessThan) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, value smithyxml.Value) error { - defer value.Close() - if v.And != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(v.And, el); err != nil { - return err - } - } - if v.ObjectSizeGreaterThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeGreaterThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeGreaterThan) - } - if v.ObjectSizeLessThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeLessThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeLessThan) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLocationInfo(v *types.LocationInfo, value smithyxml.Value) error { - defer value.Close() - if v.Name != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Name", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Name) - } - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { - defer value.Close() - if v.TargetBucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetBucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TargetBucket) - } - if v.TargetGrants != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetGrants", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil { - return err - } - } - if v.TargetObjectKeyFormat != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetObjectKeyFormat", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTargetObjectKeyFormat(v.TargetObjectKeyFormat, el); err != nil { - return err - } - } - if v.TargetPrefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetPrefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TargetPrefix) - } - return nil -} - -func awsRestxml_serializeDocumentMetadataConfiguration(v *types.MetadataConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.InventoryTableConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InventoryTableConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryTableConfiguration(v.InventoryTableConfiguration, el); err != nil { - return err - } - } - if v.JournalTableConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "JournalTableConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentJournalTableConfiguration(v.JournalTableConfiguration, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error { - defer value.Close() - if v.Name != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Name", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Name) - } - if v.Value != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Value", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Value) - } - return nil -} - -func awsRestxml_serializeDocumentMetadataTableConfiguration(v *types.MetadataTableConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.S3TablesDestination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3TablesDestination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentS3TablesDestination(v.S3TablesDestination, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentMetadataTableEncryptionConfiguration(v *types.MetadataTableEncryptionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.KmsKeyArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KmsKeyArn", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KmsKeyArn) - } - if len(v.SseAlgorithm) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SseAlgorithm", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.SseAlgorithm)) - } - return nil -} - -func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { - defer value.Close() - if v.EventThreshold != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EventThreshold", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil { - return err - } - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.AccessPointArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessPointArn", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.AccessPointArn) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - return nil -} - -func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error { - defer value.Close() - switch uv := v.(type) { - case *types.MetricsFilterMemberAccessPointArn: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessPointArn", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.MetricsFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil { - return err - } - - case *types.MetricsFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.MetricsFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { - return err - } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - - } - return nil -} - -func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { - defer value.Close() - if v.NewerNoncurrentVersions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NewerNoncurrentVersions", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NewerNoncurrentVersions) - } - if v.NoncurrentDays != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentDays", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NoncurrentDays) - } - return nil -} - -func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { - defer value.Close() - if v.NewerNoncurrentVersions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NewerNoncurrentVersions", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NewerNoncurrentVersions) - } - if v.NoncurrentDays != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentDays", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NoncurrentDays) - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - return nil -} - -func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.EventBridgeConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EventBridgeConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentEventBridgeConfiguration(v.EventBridgeConfiguration, el); err != nil { - return err - } - } - if v.LambdaFunctionConfigurations != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CloudFunctionConfiguration", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil { - return err - } - } - if v.QueueConfigurations != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QueueConfiguration", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil { - return err - } - } - if v.TopicConfigurations != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TopicConfiguration", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error { - defer value.Close() - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { - defer value.Close() - if v.ETag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ETag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ETag) - } - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Key) - } - if v.LastModifiedTime != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LastModifiedTime", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatHTTPDate(*v.LastModifiedTime)) - } - if v.Size != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Size", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.Size) - } - if v.VersionId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "VersionId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.VersionId) - } - return nil -} - -func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.ObjectLockEnabled) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectLockEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ObjectLockEnabled)) - } - if v.Rule != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error { - defer value.Close() - if len(v.Mode) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Mode", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Mode)) - } - if v.RetainUntilDate != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RetainUntilDate", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatDateTime(*v.RetainUntilDate)) - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error { - defer value.Close() - if v.DefaultRetention != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DefaultRetention", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error { - defer value.Close() - if v.S3 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error { - defer value.Close() - if v.CSV != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CSV", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil { - return err - } - } - if v.JSON != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "JSON", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error { - defer value.Close() - if v.DisplayName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DisplayName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.DisplayName) - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - return nil -} - -func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error { - defer value.Close() - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error { - defer value.Close() - if len(v.ObjectOwnership) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectOwnership", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ObjectOwnership)) - } - return nil -} - -func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentPartitionedPrefix(v *types.PartitionedPrefix, value smithyxml.Value) error { - defer value.Close() - if len(v.PartitionDateSource) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PartitionDateSource", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.PartitionDateSource)) - } - return nil -} - -func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.BlockPublicAcls != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BlockPublicAcls", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.BlockPublicAcls) - } - if v.BlockPublicPolicy != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BlockPublicPolicy", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.BlockPublicPolicy) - } - if v.IgnorePublicAcls != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IgnorePublicAcls", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.IgnorePublicAcls) - } - if v.RestrictPublicBuckets != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RestrictPublicBuckets", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.RestrictPublicBuckets) - } - return nil -} - -func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Events != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Event", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.QueueArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Queue", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QueueArn) - } - return nil -} - -func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentRecordExpiration(v *types.RecordExpiration, value smithyxml.Value) error { - defer value.Close() - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if len(v.Expiration) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Expiration)) - } - return nil -} - -func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error { - defer value.Close() - if v.HostName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HostName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HostName) - } - if v.HttpRedirectCode != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HttpRedirectCode", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HttpRedirectCode) - } - if len(v.Protocol) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Protocol", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Protocol)) - } - if v.ReplaceKeyPrefixWith != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplaceKeyPrefixWith", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ReplaceKeyPrefixWith) - } - if v.ReplaceKeyWith != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplaceKeyWith", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ReplaceKeyWith) - } - return nil -} - -func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error { - defer value.Close() - if v.HostName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HostName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HostName) - } - if len(v.Protocol) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Protocol", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Protocol)) - } - return nil -} - -func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Role != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Role", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Role) - } - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error { - defer value.Close() - if v.DeleteMarkerReplication != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DeleteMarkerReplication", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil { - return err - } - } - if v.Destination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Destination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil { - return err - } - } - if v.ExistingObjectReplication != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExistingObjectReplication", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil { - return err - } - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Priority != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Priority", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Priority) - } - if v.SourceSelectionCriteria != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SourceSelectionCriteria", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil { - return err - } - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, value smithyxml.Value) error { - defer value.Close() - if v.And != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(v.And, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - if v.Time != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Time", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { - defer value.Close() - if v.Minutes != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Minutes", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Minutes) - } - return nil -} - -func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.Payer) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Payer", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Payer)) - } - return nil -} - -func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { - defer value.Close() - if v.Enabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Enabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.Enabled) - } - return nil -} - -func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { - defer value.Close() - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if v.Description != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Description", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Description) - } - if v.GlacierJobParameters != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "GlacierJobParameters", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil { - return err - } - } - if v.OutputLocation != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputLocation", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil { - return err - } - } - if v.SelectParameters != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SelectParameters", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil { - return err - } - } - if len(v.Tier) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tier", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Tier)) - } - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error { - defer value.Close() - if v.Condition != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Condition", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil { - return err - } - } - if v.Redirect != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Redirect", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RoutingRule", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error { - defer value.Close() - if v.FilterRules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FilterRule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error { - defer value.Close() - if v.AccessControlList != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlList", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil { - return err - } - } - if v.BucketName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.BucketName) - } - if len(v.CannedACL) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CannedACL", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.CannedACL)) - } - if v.Encryption != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Encryption", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - if v.Tagging != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tagging", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil { - return err - } - } - if v.UserMetadata != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "UserMetadata", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentS3TablesDestination(v *types.S3TablesDestination, value smithyxml.Value) error { - defer value.Close() - if v.TableBucketArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TableBucketArn", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TableBucketArn) - } - if v.TableName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TableName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TableName) - } - return nil -} - -func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { - defer value.Close() - if v.End != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "End", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.End) - } - if v.Start != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Start", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.Start) - } - return nil -} - -func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error { - defer value.Close() - if v.Expression != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expression", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Expression) - } - if len(v.ExpressionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExpressionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ExpressionType)) - } - if v.InputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { - return err - } - } - if v.OutputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error { - defer value.Close() - if v.KMSMasterKeyID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KMSMasterKeyID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KMSMasterKeyID) - } - if len(v.SSEAlgorithm) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SSEAlgorithm", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.SSEAlgorithm)) - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error { - defer value.Close() - if v.ApplyServerSideEncryptionByDefault != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ApplyServerSideEncryptionByDefault", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil { - return err - } - } - if v.BucketKeyEnabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketKeyEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.BucketKeyEnabled) - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentSimplePrefix(v *types.SimplePrefix, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error { - defer value.Close() - if v.ReplicaModifications != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicaModifications", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil { - return err - } - } - if v.SseKmsEncryptedObjects != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SseKmsEncryptedObjects", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error { - defer value.Close() - if v.KeyId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KeyId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KeyId) - } - return nil -} - -func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error { - defer value.Close() - if v.DataExport != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DataExport", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error { - defer value.Close() - if v.Destination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Destination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil { - return err - } - } - if len(v.OutputSchemaVersion) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputSchemaVersion", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.OutputSchemaVersion)) - } - return nil -} - -func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error { - defer value.Close() - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Key) - } - if v.Value != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Value", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Value) - } - return nil -} - -func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error { - defer value.Close() - if v.TagSet != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TagSet", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error { - defer value.Close() - if v.Grantee != nil { - rootAttr := []smithyxml.Attr{} - rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) - if len(v.Grantee.Type) > 0 { - var av string - av = string(v.Grantee.Type) - rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) - } - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grantee", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { - return err - } - } - if len(v.Permission) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Permission", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Permission)) - } - return nil -} - -func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grant", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTargetObjectKeyFormat(v *types.TargetObjectKeyFormat, value smithyxml.Value) error { - defer value.Close() - if v.PartitionedPrefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PartitionedPrefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentPartitionedPrefix(v.PartitionedPrefix, el); err != nil { - return err - } - } - if v.SimplePrefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SimplePrefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSimplePrefix(v.SimplePrefix, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error { - defer value.Close() - if len(v.AccessTier) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessTier", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.AccessTier)) - } - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - return nil -} - -func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Events != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Event", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.TopicArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Topic", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TopicArn) - } - return nil -} - -func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error { - defer value.Close() - if v.Date != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Date", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatDateTime(*v.Date)) - } - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - return nil -} - -func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetadataEntry", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.MFADelete) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MfaDelete", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.MFADelete)) - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.ErrorDocument != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ErrorDocument", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil { - return err - } - } - if v.IndexDocument != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IndexDocument", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil { - return err - } - } - if v.RedirectAllRequestsTo != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RedirectAllRequestsTo", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil { - return err - } - } - if v.RoutingRules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RoutingRules", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go deleted file mode 100644 index e3c2c415269e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ /dev/null @@ -1,1556 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -type AnalyticsS3ExportFileFormat string - -// Enum values for AnalyticsS3ExportFileFormat -const ( - AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV" -) - -// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { - return []AnalyticsS3ExportFileFormat{ - "CSV", - } -} - -type ArchiveStatus string - -// Enum values for ArchiveStatus -const ( - ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS" - ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS" -) - -// Values returns all known values for ArchiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ArchiveStatus) Values() []ArchiveStatus { - return []ArchiveStatus{ - "ARCHIVE_ACCESS", - "DEEP_ARCHIVE_ACCESS", - } -} - -type BucketAccelerateStatus string - -// Enum values for BucketAccelerateStatus -const ( - BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled" - BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended" -) - -// Values returns all known values for BucketAccelerateStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { - return []BucketAccelerateStatus{ - "Enabled", - "Suspended", - } -} - -type BucketCannedACL string - -// Enum values for BucketCannedACL -const ( - BucketCannedACLPrivate BucketCannedACL = "private" - BucketCannedACLPublicRead BucketCannedACL = "public-read" - BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write" - BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read" -) - -// Values returns all known values for BucketCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketCannedACL) Values() []BucketCannedACL { - return []BucketCannedACL{ - "private", - "public-read", - "public-read-write", - "authenticated-read", - } -} - -type BucketLocationConstraint string - -// Enum values for BucketLocationConstraint -const ( - BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1" - BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1" - BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1" - BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" - BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" - BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" - BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2" - BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" - BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" - BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" - BucketLocationConstraintApSoutheast4 BucketLocationConstraint = "ap-southeast-4" - BucketLocationConstraintApSoutheast5 BucketLocationConstraint = "ap-southeast-5" - BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" - BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" - BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" - BucketLocationConstraintEu BucketLocationConstraint = "EU" - BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" - BucketLocationConstraintEuCentral2 BucketLocationConstraint = "eu-central-2" - BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" - BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" - BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" - BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" - BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" - BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" - BucketLocationConstraintIlCentral1 BucketLocationConstraint = "il-central-1" - BucketLocationConstraintMeCentral1 BucketLocationConstraint = "me-central-1" - BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" - BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" - BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" - BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1" - BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1" - BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1" - BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" -) - -// Values returns all known values for BucketLocationConstraint. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketLocationConstraint) Values() []BucketLocationConstraint { - return []BucketLocationConstraint{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ap-southeast-5", - "ca-central-1", - "cn-north-1", - "cn-northwest-1", - "EU", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "il-central-1", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", - } -} - -type BucketLogsPermission string - -// Enum values for BucketLogsPermission -const ( - BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL" - BucketLogsPermissionRead BucketLogsPermission = "READ" - BucketLogsPermissionWrite BucketLogsPermission = "WRITE" -) - -// Values returns all known values for BucketLogsPermission. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketLogsPermission) Values() []BucketLogsPermission { - return []BucketLogsPermission{ - "FULL_CONTROL", - "READ", - "WRITE", - } -} - -type BucketType string - -// Enum values for BucketType -const ( - BucketTypeDirectory BucketType = "Directory" -) - -// Values returns all known values for BucketType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketType) Values() []BucketType { - return []BucketType{ - "Directory", - } -} - -type BucketVersioningStatus string - -// Enum values for BucketVersioningStatus -const ( - BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled" - BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended" -) - -// Values returns all known values for BucketVersioningStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketVersioningStatus) Values() []BucketVersioningStatus { - return []BucketVersioningStatus{ - "Enabled", - "Suspended", - } -} - -type ChecksumAlgorithm string - -// Enum values for ChecksumAlgorithm -const ( - ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" - ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" - ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" - ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" - ChecksumAlgorithmCrc64nvme ChecksumAlgorithm = "CRC64NVME" -) - -// Values returns all known values for ChecksumAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { - return []ChecksumAlgorithm{ - "CRC32", - "CRC32C", - "SHA1", - "SHA256", - "CRC64NVME", - } -} - -type ChecksumMode string - -// Enum values for ChecksumMode -const ( - ChecksumModeEnabled ChecksumMode = "ENABLED" -) - -// Values returns all known values for ChecksumMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ChecksumMode) Values() []ChecksumMode { - return []ChecksumMode{ - "ENABLED", - } -} - -type ChecksumType string - -// Enum values for ChecksumType -const ( - ChecksumTypeComposite ChecksumType = "COMPOSITE" - ChecksumTypeFullObject ChecksumType = "FULL_OBJECT" -) - -// Values returns all known values for ChecksumType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ChecksumType) Values() []ChecksumType { - return []ChecksumType{ - "COMPOSITE", - "FULL_OBJECT", - } -} - -type CompressionType string - -// Enum values for CompressionType -const ( - CompressionTypeNone CompressionType = "NONE" - CompressionTypeGzip CompressionType = "GZIP" - CompressionTypeBzip2 CompressionType = "BZIP2" -) - -// Values returns all known values for CompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (CompressionType) Values() []CompressionType { - return []CompressionType{ - "NONE", - "GZIP", - "BZIP2", - } -} - -type DataRedundancy string - -// Enum values for DataRedundancy -const ( - DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" - DataRedundancySingleLocalZone DataRedundancy = "SingleLocalZone" -) - -// Values returns all known values for DataRedundancy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (DataRedundancy) Values() []DataRedundancy { - return []DataRedundancy{ - "SingleAvailabilityZone", - "SingleLocalZone", - } -} - -type DeleteMarkerReplicationStatus string - -// Enum values for DeleteMarkerReplicationStatus -const ( - DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled" - DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled" -) - -// Values returns all known values for DeleteMarkerReplicationStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { - return []DeleteMarkerReplicationStatus{ - "Enabled", - "Disabled", - } -} - -type EncodingType string - -// Enum values for EncodingType -const ( - EncodingTypeUrl EncodingType = "url" -) - -// Values returns all known values for EncodingType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (EncodingType) Values() []EncodingType { - return []EncodingType{ - "url", - } -} - -type Event string - -// Enum values for Event -const ( - EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject" - EventS3ObjectCreated Event = "s3:ObjectCreated:*" - EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put" - EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post" - EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy" - EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload" - EventS3ObjectRemoved Event = "s3:ObjectRemoved:*" - EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete" - EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated" - EventS3ObjectRestore Event = "s3:ObjectRestore:*" - EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post" - EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed" - EventS3Replication Event = "s3:Replication:*" - EventS3ReplicationOperationFailedReplication Event = "s3:Replication:OperationFailedReplication" - EventS3ReplicationOperationNotTracked Event = "s3:Replication:OperationNotTracked" - EventS3ReplicationOperationMissedThreshold Event = "s3:Replication:OperationMissedThreshold" - EventS3ReplicationOperationReplicatedAfterThreshold Event = "s3:Replication:OperationReplicatedAfterThreshold" - EventS3ObjectRestoreDelete Event = "s3:ObjectRestore:Delete" - EventS3LifecycleTransition Event = "s3:LifecycleTransition" - EventS3IntelligentTiering Event = "s3:IntelligentTiering" - EventS3ObjectAclPut Event = "s3:ObjectAcl:Put" - EventS3LifecycleExpiration Event = "s3:LifecycleExpiration:*" - EventS3LifecycleExpirationDelete Event = "s3:LifecycleExpiration:Delete" - EventS3LifecycleExpirationDeleteMarkerCreated Event = "s3:LifecycleExpiration:DeleteMarkerCreated" - EventS3ObjectTagging Event = "s3:ObjectTagging:*" - EventS3ObjectTaggingPut Event = "s3:ObjectTagging:Put" - EventS3ObjectTaggingDelete Event = "s3:ObjectTagging:Delete" -) - -// Values returns all known values for Event. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Event) Values() []Event { - return []Event{ - "s3:ReducedRedundancyLostObject", - "s3:ObjectCreated:*", - "s3:ObjectCreated:Put", - "s3:ObjectCreated:Post", - "s3:ObjectCreated:Copy", - "s3:ObjectCreated:CompleteMultipartUpload", - "s3:ObjectRemoved:*", - "s3:ObjectRemoved:Delete", - "s3:ObjectRemoved:DeleteMarkerCreated", - "s3:ObjectRestore:*", - "s3:ObjectRestore:Post", - "s3:ObjectRestore:Completed", - "s3:Replication:*", - "s3:Replication:OperationFailedReplication", - "s3:Replication:OperationNotTracked", - "s3:Replication:OperationMissedThreshold", - "s3:Replication:OperationReplicatedAfterThreshold", - "s3:ObjectRestore:Delete", - "s3:LifecycleTransition", - "s3:IntelligentTiering", - "s3:ObjectAcl:Put", - "s3:LifecycleExpiration:*", - "s3:LifecycleExpiration:Delete", - "s3:LifecycleExpiration:DeleteMarkerCreated", - "s3:ObjectTagging:*", - "s3:ObjectTagging:Put", - "s3:ObjectTagging:Delete", - } -} - -type ExistingObjectReplicationStatus string - -// Enum values for ExistingObjectReplicationStatus -const ( - ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled" - ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled" -) - -// Values returns all known values for ExistingObjectReplicationStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { - return []ExistingObjectReplicationStatus{ - "Enabled", - "Disabled", - } -} - -type ExpirationState string - -// Enum values for ExpirationState -const ( - ExpirationStateEnabled ExpirationState = "ENABLED" - ExpirationStateDisabled ExpirationState = "DISABLED" -) - -// Values returns all known values for ExpirationState. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExpirationState) Values() []ExpirationState { - return []ExpirationState{ - "ENABLED", - "DISABLED", - } -} - -type ExpirationStatus string - -// Enum values for ExpirationStatus -const ( - ExpirationStatusEnabled ExpirationStatus = "Enabled" - ExpirationStatusDisabled ExpirationStatus = "Disabled" -) - -// Values returns all known values for ExpirationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExpirationStatus) Values() []ExpirationStatus { - return []ExpirationStatus{ - "Enabled", - "Disabled", - } -} - -type ExpressionType string - -// Enum values for ExpressionType -const ( - ExpressionTypeSql ExpressionType = "SQL" -) - -// Values returns all known values for ExpressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExpressionType) Values() []ExpressionType { - return []ExpressionType{ - "SQL", - } -} - -type FileHeaderInfo string - -// Enum values for FileHeaderInfo -const ( - FileHeaderInfoUse FileHeaderInfo = "USE" - FileHeaderInfoIgnore FileHeaderInfo = "IGNORE" - FileHeaderInfoNone FileHeaderInfo = "NONE" -) - -// Values returns all known values for FileHeaderInfo. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (FileHeaderInfo) Values() []FileHeaderInfo { - return []FileHeaderInfo{ - "USE", - "IGNORE", - "NONE", - } -} - -type FilterRuleName string - -// Enum values for FilterRuleName -const ( - FilterRuleNamePrefix FilterRuleName = "prefix" - FilterRuleNameSuffix FilterRuleName = "suffix" -) - -// Values returns all known values for FilterRuleName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (FilterRuleName) Values() []FilterRuleName { - return []FilterRuleName{ - "prefix", - "suffix", - } -} - -type IntelligentTieringAccessTier string - -// Enum values for IntelligentTieringAccessTier -const ( - IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS" - IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS" -) - -// Values returns all known values for IntelligentTieringAccessTier. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { - return []IntelligentTieringAccessTier{ - "ARCHIVE_ACCESS", - "DEEP_ARCHIVE_ACCESS", - } -} - -type IntelligentTieringStatus string - -// Enum values for IntelligentTieringStatus -const ( - IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled" - IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled" -) - -// Values returns all known values for IntelligentTieringStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { - return []IntelligentTieringStatus{ - "Enabled", - "Disabled", - } -} - -type InventoryConfigurationState string - -// Enum values for InventoryConfigurationState -const ( - InventoryConfigurationStateEnabled InventoryConfigurationState = "ENABLED" - InventoryConfigurationStateDisabled InventoryConfigurationState = "DISABLED" -) - -// Values returns all known values for InventoryConfigurationState. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryConfigurationState) Values() []InventoryConfigurationState { - return []InventoryConfigurationState{ - "ENABLED", - "DISABLED", - } -} - -type InventoryFormat string - -// Enum values for InventoryFormat -const ( - InventoryFormatCsv InventoryFormat = "CSV" - InventoryFormatOrc InventoryFormat = "ORC" - InventoryFormatParquet InventoryFormat = "Parquet" -) - -// Values returns all known values for InventoryFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryFormat) Values() []InventoryFormat { - return []InventoryFormat{ - "CSV", - "ORC", - "Parquet", - } -} - -type InventoryFrequency string - -// Enum values for InventoryFrequency -const ( - InventoryFrequencyDaily InventoryFrequency = "Daily" - InventoryFrequencyWeekly InventoryFrequency = "Weekly" -) - -// Values returns all known values for InventoryFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryFrequency) Values() []InventoryFrequency { - return []InventoryFrequency{ - "Daily", - "Weekly", - } -} - -type InventoryIncludedObjectVersions string - -// Enum values for InventoryIncludedObjectVersions -const ( - InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All" - InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current" -) - -// Values returns all known values for InventoryIncludedObjectVersions. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { - return []InventoryIncludedObjectVersions{ - "All", - "Current", - } -} - -type InventoryOptionalField string - -// Enum values for InventoryOptionalField -const ( - InventoryOptionalFieldSize InventoryOptionalField = "Size" - InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate" - InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass" - InventoryOptionalFieldETag InventoryOptionalField = "ETag" - InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded" - InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus" - InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus" - InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate" - InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode" - InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus" - InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier" - InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus" - InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm" - InventoryOptionalFieldObjectAccessControlList InventoryOptionalField = "ObjectAccessControlList" - InventoryOptionalFieldObjectOwner InventoryOptionalField = "ObjectOwner" -) - -// Values returns all known values for InventoryOptionalField. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryOptionalField) Values() []InventoryOptionalField { - return []InventoryOptionalField{ - "Size", - "LastModifiedDate", - "StorageClass", - "ETag", - "IsMultipartUploaded", - "ReplicationStatus", - "EncryptionStatus", - "ObjectLockRetainUntilDate", - "ObjectLockMode", - "ObjectLockLegalHoldStatus", - "IntelligentTieringAccessTier", - "BucketKeyStatus", - "ChecksumAlgorithm", - "ObjectAccessControlList", - "ObjectOwner", - } -} - -type JSONType string - -// Enum values for JSONType -const ( - JSONTypeDocument JSONType = "DOCUMENT" - JSONTypeLines JSONType = "LINES" -) - -// Values returns all known values for JSONType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (JSONType) Values() []JSONType { - return []JSONType{ - "DOCUMENT", - "LINES", - } -} - -type LocationType string - -// Enum values for LocationType -const ( - LocationTypeAvailabilityZone LocationType = "AvailabilityZone" - LocationTypeLocalZone LocationType = "LocalZone" -) - -// Values returns all known values for LocationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (LocationType) Values() []LocationType { - return []LocationType{ - "AvailabilityZone", - "LocalZone", - } -} - -type MetadataDirective string - -// Enum values for MetadataDirective -const ( - MetadataDirectiveCopy MetadataDirective = "COPY" - MetadataDirectiveReplace MetadataDirective = "REPLACE" -) - -// Values returns all known values for MetadataDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MetadataDirective) Values() []MetadataDirective { - return []MetadataDirective{ - "COPY", - "REPLACE", - } -} - -type MetricsStatus string - -// Enum values for MetricsStatus -const ( - MetricsStatusEnabled MetricsStatus = "Enabled" - MetricsStatusDisabled MetricsStatus = "Disabled" -) - -// Values returns all known values for MetricsStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MetricsStatus) Values() []MetricsStatus { - return []MetricsStatus{ - "Enabled", - "Disabled", - } -} - -type MFADelete string - -// Enum values for MFADelete -const ( - MFADeleteEnabled MFADelete = "Enabled" - MFADeleteDisabled MFADelete = "Disabled" -) - -// Values returns all known values for MFADelete. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MFADelete) Values() []MFADelete { - return []MFADelete{ - "Enabled", - "Disabled", - } -} - -type MFADeleteStatus string - -// Enum values for MFADeleteStatus -const ( - MFADeleteStatusEnabled MFADeleteStatus = "Enabled" - MFADeleteStatusDisabled MFADeleteStatus = "Disabled" -) - -// Values returns all known values for MFADeleteStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MFADeleteStatus) Values() []MFADeleteStatus { - return []MFADeleteStatus{ - "Enabled", - "Disabled", - } -} - -type ObjectAttributes string - -// Enum values for ObjectAttributes -const ( - ObjectAttributesEtag ObjectAttributes = "ETag" - ObjectAttributesChecksum ObjectAttributes = "Checksum" - ObjectAttributesObjectParts ObjectAttributes = "ObjectParts" - ObjectAttributesStorageClass ObjectAttributes = "StorageClass" - ObjectAttributesObjectSize ObjectAttributes = "ObjectSize" -) - -// Values returns all known values for ObjectAttributes. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectAttributes) Values() []ObjectAttributes { - return []ObjectAttributes{ - "ETag", - "Checksum", - "ObjectParts", - "StorageClass", - "ObjectSize", - } -} - -type ObjectCannedACL string - -// Enum values for ObjectCannedACL -const ( - ObjectCannedACLPrivate ObjectCannedACL = "private" - ObjectCannedACLPublicRead ObjectCannedACL = "public-read" - ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write" - ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read" - ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read" - ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read" - ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control" -) - -// Values returns all known values for ObjectCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectCannedACL) Values() []ObjectCannedACL { - return []ObjectCannedACL{ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", - } -} - -type ObjectLockEnabled string - -// Enum values for ObjectLockEnabled -const ( - ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled" -) - -// Values returns all known values for ObjectLockEnabled. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockEnabled) Values() []ObjectLockEnabled { - return []ObjectLockEnabled{ - "Enabled", - } -} - -type ObjectLockLegalHoldStatus string - -// Enum values for ObjectLockLegalHoldStatus -const ( - ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON" - ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF" -) - -// Values returns all known values for ObjectLockLegalHoldStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { - return []ObjectLockLegalHoldStatus{ - "ON", - "OFF", - } -} - -type ObjectLockMode string - -// Enum values for ObjectLockMode -const ( - ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE" - ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE" -) - -// Values returns all known values for ObjectLockMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockMode) Values() []ObjectLockMode { - return []ObjectLockMode{ - "GOVERNANCE", - "COMPLIANCE", - } -} - -type ObjectLockRetentionMode string - -// Enum values for ObjectLockRetentionMode -const ( - ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE" - ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE" -) - -// Values returns all known values for ObjectLockRetentionMode. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { - return []ObjectLockRetentionMode{ - "GOVERNANCE", - "COMPLIANCE", - } -} - -type ObjectOwnership string - -// Enum values for ObjectOwnership -const ( - ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred" - ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter" - ObjectOwnershipBucketOwnerEnforced ObjectOwnership = "BucketOwnerEnforced" -) - -// Values returns all known values for ObjectOwnership. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectOwnership) Values() []ObjectOwnership { - return []ObjectOwnership{ - "BucketOwnerPreferred", - "ObjectWriter", - "BucketOwnerEnforced", - } -} - -type ObjectStorageClass string - -// Enum values for ObjectStorageClass -const ( - ObjectStorageClassStandard ObjectStorageClass = "STANDARD" - ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY" - ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" - ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA" - ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA" - ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING" - ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" - ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" - ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" - ObjectStorageClassSnow ObjectStorageClass = "SNOW" - ObjectStorageClassExpressOnezone ObjectStorageClass = "EXPRESS_ONEZONE" - ObjectStorageClassFsxOpenzfs ObjectStorageClass = "FSX_OPENZFS" -) - -// Values returns all known values for ObjectStorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectStorageClass) Values() []ObjectStorageClass { - return []ObjectStorageClass{ - "STANDARD", - "REDUCED_REDUNDANCY", - "GLACIER", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "DEEP_ARCHIVE", - "OUTPOSTS", - "GLACIER_IR", - "SNOW", - "EXPRESS_ONEZONE", - "FSX_OPENZFS", - } -} - -type ObjectVersionStorageClass string - -// Enum values for ObjectVersionStorageClass -const ( - ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD" -) - -// Values returns all known values for ObjectVersionStorageClass. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { - return []ObjectVersionStorageClass{ - "STANDARD", - } -} - -type OptionalObjectAttributes string - -// Enum values for OptionalObjectAttributes -const ( - OptionalObjectAttributesRestoreStatus OptionalObjectAttributes = "RestoreStatus" -) - -// Values returns all known values for OptionalObjectAttributes. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { - return []OptionalObjectAttributes{ - "RestoreStatus", - } -} - -type OwnerOverride string - -// Enum values for OwnerOverride -const ( - OwnerOverrideDestination OwnerOverride = "Destination" -) - -// Values returns all known values for OwnerOverride. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (OwnerOverride) Values() []OwnerOverride { - return []OwnerOverride{ - "Destination", - } -} - -type PartitionDateSource string - -// Enum values for PartitionDateSource -const ( - PartitionDateSourceEventTime PartitionDateSource = "EventTime" - PartitionDateSourceDeliveryTime PartitionDateSource = "DeliveryTime" -) - -// Values returns all known values for PartitionDateSource. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (PartitionDateSource) Values() []PartitionDateSource { - return []PartitionDateSource{ - "EventTime", - "DeliveryTime", - } -} - -type Payer string - -// Enum values for Payer -const ( - PayerRequester Payer = "Requester" - PayerBucketOwner Payer = "BucketOwner" -) - -// Values returns all known values for Payer. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Payer) Values() []Payer { - return []Payer{ - "Requester", - "BucketOwner", - } -} - -type Permission string - -// Enum values for Permission -const ( - PermissionFullControl Permission = "FULL_CONTROL" - PermissionWrite Permission = "WRITE" - PermissionWriteAcp Permission = "WRITE_ACP" - PermissionRead Permission = "READ" - PermissionReadAcp Permission = "READ_ACP" -) - -// Values returns all known values for Permission. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Permission) Values() []Permission { - return []Permission{ - "FULL_CONTROL", - "WRITE", - "WRITE_ACP", - "READ", - "READ_ACP", - } -} - -type Protocol string - -// Enum values for Protocol -const ( - ProtocolHttp Protocol = "http" - ProtocolHttps Protocol = "https" -) - -// Values returns all known values for Protocol. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Protocol) Values() []Protocol { - return []Protocol{ - "http", - "https", - } -} - -type QuoteFields string - -// Enum values for QuoteFields -const ( - QuoteFieldsAlways QuoteFields = "ALWAYS" - QuoteFieldsAsneeded QuoteFields = "ASNEEDED" -) - -// Values returns all known values for QuoteFields. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (QuoteFields) Values() []QuoteFields { - return []QuoteFields{ - "ALWAYS", - "ASNEEDED", - } -} - -type ReplicaModificationsStatus string - -// Enum values for ReplicaModificationsStatus -const ( - ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled" - ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled" -) - -// Values returns all known values for ReplicaModificationsStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { - return []ReplicaModificationsStatus{ - "Enabled", - "Disabled", - } -} - -type ReplicationRuleStatus string - -// Enum values for ReplicationRuleStatus -const ( - ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled" - ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled" -) - -// Values returns all known values for ReplicationRuleStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { - return []ReplicationRuleStatus{ - "Enabled", - "Disabled", - } -} - -type ReplicationStatus string - -// Enum values for ReplicationStatus -const ( - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - ReplicationStatusPending ReplicationStatus = "PENDING" - ReplicationStatusFailed ReplicationStatus = "FAILED" - ReplicationStatusReplica ReplicationStatus = "REPLICA" - ReplicationStatusCompleted ReplicationStatus = "COMPLETED" -) - -// Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationStatus) Values() []ReplicationStatus { - return []ReplicationStatus{ - "COMPLETE", - "PENDING", - "FAILED", - "REPLICA", - "COMPLETED", - } -} - -type ReplicationTimeStatus string - -// Enum values for ReplicationTimeStatus -const ( - ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled" - ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" -) - -// Values returns all known values for ReplicationTimeStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { - return []ReplicationTimeStatus{ - "Enabled", - "Disabled", - } -} - -type RequestCharged string - -// Enum values for RequestCharged -const ( - RequestChargedRequester RequestCharged = "requester" -) - -// Values returns all known values for RequestCharged. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (RequestCharged) Values() []RequestCharged { - return []RequestCharged{ - "requester", - } -} - -type RequestPayer string - -// Enum values for RequestPayer -const ( - RequestPayerRequester RequestPayer = "requester" -) - -// Values returns all known values for RequestPayer. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (RequestPayer) Values() []RequestPayer { - return []RequestPayer{ - "requester", - } -} - -type RestoreRequestType string - -// Enum values for RestoreRequestType -const ( - RestoreRequestTypeSelect RestoreRequestType = "SELECT" -) - -// Values returns all known values for RestoreRequestType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (RestoreRequestType) Values() []RestoreRequestType { - return []RestoreRequestType{ - "SELECT", - } -} - -type S3TablesBucketType string - -// Enum values for S3TablesBucketType -const ( - S3TablesBucketTypeAws S3TablesBucketType = "aws" - S3TablesBucketTypeCustomer S3TablesBucketType = "customer" -) - -// Values returns all known values for S3TablesBucketType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (S3TablesBucketType) Values() []S3TablesBucketType { - return []S3TablesBucketType{ - "aws", - "customer", - } -} - -type ServerSideEncryption string - -// Enum values for ServerSideEncryption -const ( - ServerSideEncryptionAes256 ServerSideEncryption = "AES256" - ServerSideEncryptionAwsFsx ServerSideEncryption = "aws:fsx" - ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" - ServerSideEncryptionAwsKmsDsse ServerSideEncryption = "aws:kms:dsse" -) - -// Values returns all known values for ServerSideEncryption. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ServerSideEncryption) Values() []ServerSideEncryption { - return []ServerSideEncryption{ - "AES256", - "aws:fsx", - "aws:kms", - "aws:kms:dsse", - } -} - -type SessionMode string - -// Enum values for SessionMode -const ( - SessionModeReadOnly SessionMode = "ReadOnly" - SessionModeReadWrite SessionMode = "ReadWrite" -) - -// Values returns all known values for SessionMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (SessionMode) Values() []SessionMode { - return []SessionMode{ - "ReadOnly", - "ReadWrite", - } -} - -type SseKmsEncryptedObjectsStatus string - -// Enum values for SseKmsEncryptedObjectsStatus -const ( - SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled" - SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled" -) - -// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { - return []SseKmsEncryptedObjectsStatus{ - "Enabled", - "Disabled", - } -} - -type StorageClass string - -// Enum values for StorageClass -const ( - StorageClassStandard StorageClass = "STANDARD" - StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" - StorageClassStandardIa StorageClass = "STANDARD_IA" - StorageClassOnezoneIa StorageClass = "ONEZONE_IA" - StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" - StorageClassGlacier StorageClass = "GLACIER" - StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" - StorageClassOutposts StorageClass = "OUTPOSTS" - StorageClassGlacierIr StorageClass = "GLACIER_IR" - StorageClassSnow StorageClass = "SNOW" - StorageClassExpressOnezone StorageClass = "EXPRESS_ONEZONE" - StorageClassFsxOpenzfs StorageClass = "FSX_OPENZFS" -) - -// Values returns all known values for StorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (StorageClass) Values() []StorageClass { - return []StorageClass{ - "STANDARD", - "REDUCED_REDUNDANCY", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "GLACIER", - "DEEP_ARCHIVE", - "OUTPOSTS", - "GLACIER_IR", - "SNOW", - "EXPRESS_ONEZONE", - "FSX_OPENZFS", - } -} - -type StorageClassAnalysisSchemaVersion string - -// Enum values for StorageClassAnalysisSchemaVersion -const ( - StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" -) - -// Values returns all known values for StorageClassAnalysisSchemaVersion. Note -// that this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { - return []StorageClassAnalysisSchemaVersion{ - "V_1", - } -} - -type TableSseAlgorithm string - -// Enum values for TableSseAlgorithm -const ( - TableSseAlgorithmAwsKms TableSseAlgorithm = "aws:kms" - TableSseAlgorithmAes256 TableSseAlgorithm = "AES256" -) - -// Values returns all known values for TableSseAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TableSseAlgorithm) Values() []TableSseAlgorithm { - return []TableSseAlgorithm{ - "aws:kms", - "AES256", - } -} - -type TaggingDirective string - -// Enum values for TaggingDirective -const ( - TaggingDirectiveCopy TaggingDirective = "COPY" - TaggingDirectiveReplace TaggingDirective = "REPLACE" -) - -// Values returns all known values for TaggingDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TaggingDirective) Values() []TaggingDirective { - return []TaggingDirective{ - "COPY", - "REPLACE", - } -} - -type Tier string - -// Enum values for Tier -const ( - TierStandard Tier = "Standard" - TierBulk Tier = "Bulk" - TierExpedited Tier = "Expedited" -) - -// Values returns all known values for Tier. Note that this can be expanded in the -// future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Tier) Values() []Tier { - return []Tier{ - "Standard", - "Bulk", - "Expedited", - } -} - -type TransitionDefaultMinimumObjectSize string - -// Enum values for TransitionDefaultMinimumObjectSize -const ( - TransitionDefaultMinimumObjectSizeVariesByStorageClass TransitionDefaultMinimumObjectSize = "varies_by_storage_class" - TransitionDefaultMinimumObjectSizeAllStorageClasses128k TransitionDefaultMinimumObjectSize = "all_storage_classes_128K" -) - -// Values returns all known values for TransitionDefaultMinimumObjectSize. Note -// that this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TransitionDefaultMinimumObjectSize) Values() []TransitionDefaultMinimumObjectSize { - return []TransitionDefaultMinimumObjectSize{ - "varies_by_storage_class", - "all_storage_classes_128K", - } -} - -type TransitionStorageClass string - -// Enum values for TransitionStorageClass -const ( - TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" - TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" - TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" - TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" - TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" - TransitionStorageClassGlacierIr TransitionStorageClass = "GLACIER_IR" -) - -// Values returns all known values for TransitionStorageClass. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TransitionStorageClass) Values() []TransitionStorageClass { - return []TransitionStorageClass{ - "GLACIER", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "DEEP_ARCHIVE", - "GLACIER_IR", - } -} - -type Type string - -// Enum values for Type -const ( - TypeCanonicalUser Type = "CanonicalUser" - TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail" - TypeGroup Type = "Group" -) - -// Values returns all known values for Type. Note that this can be expanded in the -// future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Type) Values() []Type { - return []Type{ - "CanonicalUser", - "AmazonCustomerByEmail", - "Group", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go deleted file mode 100644 index 9de828f6fcf5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ /dev/null @@ -1,417 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The requested bucket name is not available. The bucket namespace is shared by -// all users of the system. Select a different name and try again. -type BucketAlreadyExists struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *BucketAlreadyExists) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *BucketAlreadyExists) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *BucketAlreadyExists) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "BucketAlreadyExists" - } - return *e.ErrorCodeOverride -} -func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The bucket you tried to create already exists, and you own it. Amazon S3 -// returns this error in all Amazon Web Services Regions except in the North -// Virginia Region. For legacy compatibility, if you re-create an existing bucket -// that you already own in the North Virginia Region, Amazon S3 returns 200 OK and -// resets the bucket access control lists (ACLs). -type BucketAlreadyOwnedByYou struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *BucketAlreadyOwnedByYou) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *BucketAlreadyOwnedByYou) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *BucketAlreadyOwnedByYou) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "BucketAlreadyOwnedByYou" - } - return *e.ErrorCodeOverride -} -func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The existing object was created with a different encryption type. Subsequent -// -// write requests must include the appropriate encryption parameters in the request -// or while creating the session. -type EncryptionTypeMismatch struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *EncryptionTypeMismatch) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *EncryptionTypeMismatch) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *EncryptionTypeMismatch) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "EncryptionTypeMismatch" - } - return *e.ErrorCodeOverride -} -func (e *EncryptionTypeMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Parameters on this idempotent request are inconsistent with parameters used in -// previous request(s). -// -// For a list of error codes and more information on Amazon S3 errors, see [Error codes]. -// -// Idempotency ensures that an API request completes no more than one time. With -// an idempotent request, if the original request completes successfully, any -// subsequent retries complete successfully without performing any further actions. -// -// [Error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -type IdempotencyParameterMismatch struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IdempotencyParameterMismatch) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IdempotencyParameterMismatch) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IdempotencyParameterMismatch) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IdempotencyParameterMismatch" - } - return *e.ErrorCodeOverride -} -func (e *IdempotencyParameterMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Object is archived and inaccessible until restored. -// -// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval -// storage class, the S3 Glacier Deep Archive storage class, the S3 -// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep -// Archive Access tier, before you can retrieve the object you must first restore a -// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. -// -// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html -// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html -type InvalidObjectState struct { - Message *string - - ErrorCodeOverride *string - - StorageClass StorageClass - AccessTier IntelligentTieringAccessTier - - noSmithyDocumentSerde -} - -func (e *InvalidObjectState) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidObjectState) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidObjectState) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidObjectState" - } - return *e.ErrorCodeOverride -} -func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// You may receive this error in multiple cases. Depending on the reason for the -// error, you may receive one of the messages below: -// -// - Cannot specify both a write offset value and user-defined object metadata -// for existing objects. -// -// - Checksum Type mismatch occurred, expected checksum Type: sha1, actual -// checksum Type: crc32c. -// -// - Request body cannot be empty when 'write offset' is specified. -type InvalidRequest struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequest) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequest) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequest) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequest" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequest) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The write offset value that you specified does not match the current object -// -// size. -type InvalidWriteOffset struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidWriteOffset) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidWriteOffset) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidWriteOffset) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidWriteOffset" - } - return *e.ErrorCodeOverride -} -func (e *InvalidWriteOffset) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified bucket does not exist. -type NoSuchBucket struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NoSuchBucket) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NoSuchBucket) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NoSuchBucket) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NoSuchBucket" - } - return *e.ErrorCodeOverride -} -func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified key does not exist. -type NoSuchKey struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NoSuchKey) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NoSuchKey) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NoSuchKey) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NoSuchKey" - } - return *e.ErrorCodeOverride -} -func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified multipart upload does not exist. -type NoSuchUpload struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NoSuchUpload) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NoSuchUpload) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NoSuchUpload) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NoSuchUpload" - } - return *e.ErrorCodeOverride -} -func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified content does not exist. -type NotFound struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NotFound) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NotFound) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NotFound) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NotFound" - } - return *e.ErrorCodeOverride -} -func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// This action is not allowed against this storage tier. -type ObjectAlreadyInActiveTierError struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ObjectAlreadyInActiveTierError) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ObjectAlreadyInActiveTierError) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ObjectAlreadyInActiveTierError" - } - return *e.ErrorCodeOverride -} -func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The source object of the COPY action is not in the active tier and is only -// stored in Amazon S3 Glacier. -type ObjectNotInActiveTierError struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ObjectNotInActiveTierError) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ObjectNotInActiveTierError) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ObjectNotInActiveTierError) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ObjectNotInActiveTierError" - } - return *e.ErrorCodeOverride -} -func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// You have attempted to add more parts than the maximum of 10000 that are -// -// allowed for this object. You can use the CopyObject operation to copy this -// object to another and then add more data to the newly copied object. -type TooManyParts struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyParts) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyParts) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyParts) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyParts" - } - return *e.ErrorCodeOverride -} -func (e *TooManyParts) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go deleted file mode 100644 index 28afadb27bec..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ /dev/null @@ -1,4907 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// Specifies the days since the initiation of an incomplete multipart upload that -// Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. -// -// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config -type AbortIncompleteMultipartUpload struct { - - // Specifies the number of days after which Amazon S3 aborts an incomplete - // multipart upload. - DaysAfterInitiation *int32 - - noSmithyDocumentSerde -} - -// Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. -// -// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -type AccelerateConfiguration struct { - - // Specifies the transfer acceleration status of the bucket. - Status BucketAccelerateStatus - - noSmithyDocumentSerde -} - -// Contains the elements that set the ACL permissions for an object per grantee. -type AccessControlPolicy struct { - - // A list of grants. - Grants []Grant - - // Container for the bucket owner's display name and ID. - Owner *Owner - - noSmithyDocumentSerde -} - -// A container for information about access control for replicas. -type AccessControlTranslation struct { - - // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the - // Amazon S3 API Reference. - // - // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html - // - // This member is required. - Owner OwnerOverride - - noSmithyDocumentSerde -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates in any -// combination, and an object must match all of the predicates for the filter to -// apply. -type AnalyticsAndOperator struct { - - // The prefix to use when evaluating an AND predicate: The prefix that an object - // must have to be included in the metrics results. - Prefix *string - - // The list of tags to use when evaluating an AND predicate. - Tags []Tag - - noSmithyDocumentSerde -} - -// Specifies the configuration and any analyses for the analytics filter of an -// Amazon S3 bucket. -type AnalyticsConfiguration struct { - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // Contains data related to access patterns to be collected and made available to - // analyze the tradeoffs between different storage classes. - // - // This member is required. - StorageClassAnalysis *StorageClassAnalysis - - // The filter used to describe a set of objects for analyses. A filter must have - // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no - // filter is provided, all objects will be considered in any analysis. - Filter AnalyticsFilter - - noSmithyDocumentSerde -} - -// Where to publish the analytics results. -type AnalyticsExportDestination struct { - - // A destination signifying output to an S3 bucket. - // - // This member is required. - S3BucketDestination *AnalyticsS3BucketDestination - - noSmithyDocumentSerde -} - -// The filter used to describe a set of objects for analyses. A filter must have -// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no -// filter is provided, all objects will be considered in any analysis. -// -// The following types satisfy this interface: -// -// AnalyticsFilterMemberAnd -// AnalyticsFilterMemberPrefix -// AnalyticsFilterMemberTag -type AnalyticsFilter interface { - isAnalyticsFilter() -} - -// A conjunction (logical AND) of predicates, which is used in evaluating an -// analytics filter. The operator must have at least two predicates. -type AnalyticsFilterMemberAnd struct { - Value AnalyticsAndOperator - - noSmithyDocumentSerde -} - -func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {} - -// The prefix to use when evaluating an analytics filter. -type AnalyticsFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} - -func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {} - -// The tag to use when evaluating an analytics filter. -type AnalyticsFilterMemberTag struct { - Value Tag - - noSmithyDocumentSerde -} - -func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {} - -// Contains information about where to publish the analytics results. -type AnalyticsS3BucketDestination struct { - - // The Amazon Resource Name (ARN) of the bucket to which data is exported. - // - // This member is required. - Bucket *string - - // Specifies the file format used when exporting data to Amazon S3. - // - // This member is required. - Format AnalyticsS3ExportFileFormat - - // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. - // - // Although this value is optional, we strongly recommend that you set it to help - // prevent problems if the destination bucket ownership changes. - BucketAccountId *string - - // The prefix to use when exporting data. The prefix is prepended to all results. - Prefix *string - - noSmithyDocumentSerde -} - -// In terms of implementation, a Bucket is a resource. -type Bucket struct { - - // The Amazon Resource Name (ARN) of the S3 bucket. ARNs uniquely identify Amazon - // Web Services resources across all of Amazon Web Services. - // - // This parameter is only supported for S3 directory buckets. For more - // information, see [Using tags with directory buckets]. - // - // [Using tags with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html - BucketArn *string - - // BucketRegion indicates the Amazon Web Services region where the bucket is - // located. If the request contains at least one valid parameter, it is included in - // the response. - BucketRegion *string - - // Date the bucket was created. This date can change when making changes to your - // bucket, such as editing its bucket policy. - CreationDate *time.Time - - // The name of the bucket. - Name *string - - noSmithyDocumentSerde -} - -// Specifies the information about the bucket that will be created. For more -// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. -// -// This functionality is only supported by directory buckets. -// -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -type BucketInfo struct { - - // The number of Zone (Availability Zone or Local Zone) that's used for redundancy - // for the bucket. - DataRedundancy DataRedundancy - - // The type of bucket. - Type BucketType - - noSmithyDocumentSerde -} - -// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. -// -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -type BucketLifecycleConfiguration struct { - - // A lifecycle rule for individual objects in an Amazon S3 bucket. - // - // This member is required. - Rules []LifecycleRule - - noSmithyDocumentSerde -} - -// Container for logging status information. -type BucketLoggingStatus struct { - - // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API - // Reference. - // - // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html - LoggingEnabled *LoggingEnabled - - noSmithyDocumentSerde -} - -// Contains all the possible checksum or digest values for an object. -type Checksum struct { - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only - // present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is - // present if the object was uploaded with the CRC64NVME checksum algorithm, or if - // the object was uploaded without a checksum (and Amazon S3 added the default - // checksum, CRC64NVME , to the uploaded object). For more information, see [Checking object integrity] in - // the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - noSmithyDocumentSerde -} - -// Container for all (if there are any) keys between Prefix and the next -// occurrence of the string specified by a delimiter. CommonPrefixes lists keys -// that act like subdirectories in the directory specified by Prefix. For example, -// if the prefix is notes/ and the delimiter is a slash (/) as in -// notes/summer/july, the common prefix is notes/summer/. -type CommonPrefix struct { - - // Container for the specified common prefix. - Prefix *string - - noSmithyDocumentSerde -} - -// The container for the completed multipart upload details. -type CompletedMultipartUpload struct { - - // Array of CompletedPart data types. - // - // If you do not supply a valid Part with your request, the service sends back an - // HTTP 400 response. - Parts []CompletedPart - - noSmithyDocumentSerde -} - -// Details of the parts that were uploaded. -type CompletedPart struct { - - // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present - // if the multipart upload request was created with the CRC32 checksum algorithm. - // For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC32C checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC64NVME checksum - // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 - // User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present - // if the multipart upload request was created with the SHA1 checksum algorithm. - // For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is - // present if the multipart upload request was created with the SHA256 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Entity tag returned when the part was uploaded. - ETag *string - - // Part number that identifies the part. This is a positive integer between 1 and - // 10,000. - // - // - General purpose buckets - In CompleteMultipartUpload , when a additional - // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , - // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the - // PartNumber must start at 1 and the part numbers must be consecutive. - // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an - // InvalidPartOrder error code. - // - // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start - // at 1 and the part numbers must be consecutive. - PartNumber *int32 - - noSmithyDocumentSerde -} - -// A container for describing a condition that must be met for the specified -// redirect to apply. For example, 1. If request is for pages in the /docs folder, -// redirect to the /documents folder. 2. If request results in HTTP error 4xx, -// redirect request to another host where you might process the error. -type Condition struct { - - // The HTTP error code when the redirect is applied. In the event of an error, if - // the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html , the key prefix will be ExamplePage.html - // . To redirect request for all pages with the prefix docs/ , the key prefix will - // be /docs , which identifies all objects in the docs/ folder. Required when the - // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for the - // redirect to be applied. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - KeyPrefixEquals *string - - noSmithyDocumentSerde -} - -type ContinuationEvent struct { - noSmithyDocumentSerde -} - -// Container for all response elements. -type CopyObjectResult struct { - - // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is - // present if the object being copied was uploaded with the CRC64NVME checksum - // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added - // the default checksum, CRC64NVME , to the uploaded object). For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // Returns the ETag of the new object. The ETag reflects only changes to the - // contents of an object, not its metadata. - ETag *string - - // Creation date of the object. - LastModified *time.Time - - noSmithyDocumentSerde -} - -// Container for all response elements. -type CopyPartResult struct { - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32 checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC32C checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC64NVME checksum - // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 - // User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA1 checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA256 checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Entity tag of the object. - ETag *string - - // Date and time at which the object was uploaded. - LastModified *time.Time - - noSmithyDocumentSerde -} - -// Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. -// -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -type CORSConfiguration struct { - - // A set of origins and methods (cross-origin access that you want to allow). You - // can add up to 100 rules to the configuration. - // - // This member is required. - CORSRules []CORSRule - - noSmithyDocumentSerde -} - -// Specifies a cross-origin access rule for an Amazon S3 bucket. -type CORSRule struct { - - // An HTTP method that you allow the origin to execute. Valid values are GET , PUT - // , HEAD , POST , and DELETE . - // - // This member is required. - AllowedMethods []string - - // One or more origins you want customers to be able to access the bucket from. - // - // This member is required. - AllowedOrigins []string - - // Headers that are specified in the Access-Control-Request-Headers header. These - // headers are allowed in a preflight OPTIONS request. In response to any preflight - // OPTIONS request, Amazon S3 returns any requested headers that are allowed. - AllowedHeaders []string - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []string - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string - - // The time in seconds that your browser is to cache the preflight response for - // the specified resource. - MaxAgeSeconds *int32 - - noSmithyDocumentSerde -} - -// The configuration information for the bucket. -type CreateBucketConfiguration struct { - - // Specifies the information about the bucket that will be created. - // - // This functionality is only supported by directory buckets. - Bucket *BucketInfo - - // Specifies the location where the bucket will be created. - // - // Directory buckets - The location type is Availability Zone or Local Zone. To - // use the Local Zone location type, your account must be enabled for Local Zones. - // Otherwise, you get an HTTP 403 Forbidden error with the error code AccessDenied - // . To learn more, see [Enable accounts for Local Zones]in the Amazon S3 User Guide. - // - // This functionality is only supported by directory buckets. - // - // [Enable accounts for Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/opt-in-directory-bucket-lz.html - Location *LocationInfo - - // Specifies the Region where the bucket will be created. You might choose a - // Region to optimize latency, minimize costs, or address regulatory requirements. - // For example, if you reside in Europe, you will probably find it advantageous to - // create buckets in the Europe (Ireland) Region. - // - // If you don't specify a Region, the bucket is created in the US East (N. - // Virginia) Region (us-east-1) by default. Configurations using the value EU will - // create a bucket in eu-west-1 . - // - // For a list of the valid values for all of the Amazon Web Services Regions, see [Regions and Endpoints]. - // - // This functionality is not supported for directory buckets. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - LocationConstraint BucketLocationConstraint - - // An array of tags that you can apply to the bucket that you're creating. Tags - // are key-value pairs of metadata used to categorize and organize your buckets, - // track costs, and control access. - // - // This parameter is only supported for S3 directory buckets. For more - // information, see [Using tags with directory buckets]. - // - // [Using tags with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html - Tags []Tag - - noSmithyDocumentSerde -} - -// Describes how an uncompressed comma-separated values (CSV)-formatted input -// object is formatted. -type CSVInput struct { - - // Specifies that CSV field values may contain quoted record delimiters and such - // records should be allowed. Default value is FALSE. Setting this value to TRUE - // may lower performance. - AllowQuotedRecordDelimiter *bool - - // A single character used to indicate that a row should be ignored when the - // character is present at the start of that row. You can specify any character to - // indicate a comment line. The default character is # . - // - // Default: # - Comments *string - - // A single character used to separate individual fields in a record. You can - // specify an arbitrary delimiter. - FieldDelimiter *string - - // Describes the first line of input. Valid values are: - // - // - NONE : First line is not a header. - // - // - IGNORE : First line is a header, but you can't use the header values to - // indicate the column in an expression. You can use column position (such as _1, - // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). - // - // - Use : First line is a header, and you can use the header value to identify a - // column in an expression ( SELECT "name" FROM OBJECT ). - FileHeaderInfo FileHeaderInfo - - // A single character used for escaping when the field delimiter is part of the - // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . - // - // Type: String - // - // Default: " - // - // Ancestors: CSV - QuoteCharacter *string - - // A single character used for escaping the quotation mark character inside an - // already escaped value. For example, the value """ a , b """ is parsed as " a , - // b " . - QuoteEscapeCharacter *string - - // A single character used to separate individual records in the input. Instead of - // the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string - - noSmithyDocumentSerde -} - -// Describes how uncompressed comma-separated values (CSV)-formatted results are -// formatted. -type CSVOutput struct { - - // The value used to separate individual fields in a record. You can specify an - // arbitrary delimiter. - FieldDelimiter *string - - // A single character used for escaping when the field delimiter is part of the - // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . - QuoteCharacter *string - - // The single character used for escaping the quote character inside an already - // escaped value. - QuoteEscapeCharacter *string - - // Indicates whether to use quotation marks around output fields. - // - // - ALWAYS : Always use quotation marks for output fields. - // - // - ASNEEDED : Use quotation marks for output fields when needed. - QuoteFields QuoteFields - - // A single character used to separate individual records in the output. Instead - // of the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string - - noSmithyDocumentSerde -} - -// The container element for optionally specifying the default Object Lock -// retention settings for new objects placed in the specified bucket. -// -// - The DefaultRetention settings require both a mode and a period. -// -// - The DefaultRetention period can be either Days or Years but you must select -// one. You cannot specify Days and Years at the same time. -type DefaultRetention struct { - - // The number of days that you want to specify for the default retention period. - // Must be used with Mode . - Days *int32 - - // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. Must be used with either Days or Years . - Mode ObjectLockRetentionMode - - // The number of years that you want to specify for the default retention period. - // Must be used with Mode . - Years *int32 - - noSmithyDocumentSerde -} - -// Container for the objects to delete. -type Delete struct { - - // The object to delete. - // - // Directory buckets - For directory buckets, an object that's composed entirely - // of whitespace characters is not supported by the DeleteObjects API operation. - // The request will receive a 400 Bad Request error and none of the objects in the - // request will be deleted. - // - // This member is required. - Objects []ObjectIdentifier - - // Element to enable quiet mode for the request. When you add this element, you - // must set its value to true . - Quiet *bool - - noSmithyDocumentSerde -} - -// Information about the deleted object. -type DeletedObject struct { - - // Indicates whether the specified object version that was permanently deleted was - // (true) or was not (false) a delete marker before deletion. In a simple DELETE, - // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. - // - // This functionality is not supported for directory buckets. - // - // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html - DeleteMarker *bool - - // The version ID of the delete marker created as a result of the DELETE - // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. - // - // This functionality is not supported for directory buckets. - DeleteMarkerVersionId *string - - // The name of the deleted object. - Key *string - - // The version ID of the deleted object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -// Information about the delete marker. -type DeleteMarkerEntry struct { - - // Specifies whether the object is (true) or is not (false) the latest version of - // an object. - IsLatest *bool - - // The object key. - Key *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // The account that created the delete marker. - Owner *Owner - - // Version ID of an object. - VersionId *string - - noSmithyDocumentSerde -} - -// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter -// in your replication configuration, you must also include a -// DeleteMarkerReplication element. If your Filter includes a Tag element, the -// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does -// not support replicating delete markers for tag-based rules. For an example -// configuration, see [Basic Rule Configuration]. -// -// For more information about delete marker replication, see [Basic Rule Configuration]. -// -// If you are using an earlier version of the replication configuration, Amazon S3 -// handles replication of delete markers differently. For more information, see [Backward Compatibility]. -// -// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html -// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations -type DeleteMarkerReplication struct { - - // Indicates whether to replicate delete markers. - // - // Indicates whether to replicate delete markers. - Status DeleteMarkerReplicationStatus - - noSmithyDocumentSerde -} - -// Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). -type Destination struct { - - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store - // the results. - // - // This member is required. - Bucket *string - - // Specify this only in a cross-account scenario (where source and destination - // bucket owners are not the same), and you want to change replica ownership to the - // Amazon Web Services account that owns the destination bucket. If this is not - // specified in the replication configuration, the replicas are owned by same - // Amazon Web Services account that owns the source object. - AccessControlTranslation *AccessControlTranslation - - // Destination bucket owner account ID. In a cross-account scenario, if you direct - // Amazon S3 to change replica ownership to the Amazon Web Services account that - // owns the destination bucket by specifying the AccessControlTranslation - // property, this is the account ID of the destination bucket owner. For more - // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. - // - // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html - Account *string - - // A container that provides information about encryption. If - // SourceSelectionCriteria is specified, you must specify this element. - EncryptionConfiguration *EncryptionConfiguration - - // A container specifying replication metrics-related settings enabling - // replication metrics and events. - Metrics *Metrics - - // A container specifying S3 Replication Time Control (S3 RTC), including whether - // S3 RTC is enabled and the time when all objects and operations on objects must - // be replicated. Must be specified together with a Metrics block. - ReplicationTime *ReplicationTime - - // The storage class to use when replicating objects, such as S3 Standard or - // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. - // - // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 - // API Reference. - // - // FSX_OPENZFS is not an accepted value when replicating objects. - // - // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html - StorageClass StorageClass - - noSmithyDocumentSerde -} - -// The destination information for the S3 Metadata configuration. -type DestinationResult struct { - - // The Amazon Resource Name (ARN) of the table bucket where the metadata - // configuration is stored. - TableBucketArn *string - - // The type of the table bucket where the metadata configuration is stored. The - // aws value indicates an Amazon Web Services managed table bucket, and the - // customer value indicates a customer-managed table bucket. V2 metadata - // configurations are stored in Amazon Web Services managed table buckets, and V1 - // metadata configurations are stored in customer-managed table buckets. - TableBucketType S3TablesBucketType - - // The namespace in the table bucket where the metadata tables for a metadata - // configuration are stored. - TableNamespace *string - - noSmithyDocumentSerde -} - -// Contains the type of server-side encryption used. -type Encryption struct { - - // The server-side encryption algorithm used when storing job results in Amazon S3 - // (for example, AES256, aws:kms ). - // - // This member is required. - EncryptionType ServerSideEncryption - - // If the encryption type is aws:kms , this optional value can be used to specify - // the encryption context for the restore results. - KMSContext *string - - // If the encryption type is aws:kms , this optional value specifies the ID of the - // symmetric encryption customer managed key to use for encryption of job results. - // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. - // - // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - KMSKeyId *string - - noSmithyDocumentSerde -} - -// Specifies encryption-related information for an Amazon S3 bucket that is a -// destination for replicated objects. -// -// If you're specifying a customer managed KMS key, we recommend using a fully -// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the -// key within the requester’s account. This behavior can result in data that's -// encrypted with a KMS key that belongs to the requester, and not the bucket -// owner. -type EncryptionConfiguration struct { - - // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web - // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for - // the destination bucket. Amazon S3 uses this key to encrypt replica objects. - // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. - // - // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - ReplicaKmsKeyID *string - - noSmithyDocumentSerde -} - -// A message that indicates the request is complete and no more messages will be -// sent. You should not assume that the request is complete until the client -// receives an EndEvent . -type EndEvent struct { - noSmithyDocumentSerde -} - -// Container for all error elements. -type Error struct { - - // The error code is a string that uniquely identifies an error condition. It is - // meant to be read and understood by programs that detect and handle errors by - // type. The following is a list of Amazon S3 error codes. For more information, - // see [Error responses]. - // - // - Code: AccessDenied - // - // - Description: Access Denied - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AccountProblem - // - // - Description: There is a problem with your Amazon Web Services account that - // prevents the action from completing successfully. Contact Amazon Web Services - // Support for further assistance. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AllAccessDisabled - // - // - Description: All access to this Amazon S3 resource has been disabled. - // Contact Amazon Web Services Support for further assistance. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AmbiguousGrantByEmailAddress - // - // - Description: The email address you provided is associated with more than - // one account. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AuthorizationHeaderMalformed - // - // - Description: The authorization header you provided is invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - HTTP Status Code: N/A - // - // - Code: BadDigest - // - // - Description: The Content-MD5 you specified did not match what we received. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: BucketAlreadyExists - // - // - Description: The requested bucket name is not available. The bucket - // namespace is shared by all users of the system. Please select a different name - // and try again. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: BucketAlreadyOwnedByYou - // - // - Description: The bucket you tried to create already exists, and you own it. - // Amazon S3 returns this error in all Amazon Web Services Regions except in the - // North Virginia Region. For legacy compatibility, if you re-create an existing - // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 - // OK and resets the bucket access control lists (ACLs). - // - // - Code: 409 Conflict (in all Regions except the North Virginia Region) - // - // - SOAP Fault Code Prefix: Client - // - // - Code: BucketNotEmpty - // - // - Description: The bucket you tried to delete is not empty. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: CredentialsNotSupported - // - // - Description: This request does not support credentials. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: CrossLocationLoggingProhibited - // - // - Description: Cross-location logging not allowed. Buckets in one geographic - // location cannot log information to a bucket in another location. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: EntityTooSmall - // - // - Description: Your proposed upload is smaller than the minimum allowed - // object size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: EntityTooLarge - // - // - Description: Your proposed upload exceeds the maximum allowed object size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: ExpiredToken - // - // - Description: The provided token has expired. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: IllegalVersioningConfigurationException - // - // - Description: Indicates that the versioning configuration specified in the - // request is invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: IncompleteBody - // - // - Description: You did not provide the number of bytes specified by the - // Content-Length HTTP header - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: IncorrectNumberOfFilesInPostRequest - // - // - Description: POST requires exactly one file upload per request. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InlineDataTooLarge - // - // - Description: Inline data exceeds the maximum allowed size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InternalError - // - // - Description: We encountered an internal error. Please try again. - // - // - HTTP Status Code: 500 Internal Server Error - // - // - SOAP Fault Code Prefix: Server - // - // - Code: InvalidAccessKeyId - // - // - Description: The Amazon Web Services access key ID you provided does not - // exist in our records. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidAddressingHeader - // - // - Description: You must specify the Anonymous role. - // - // - HTTP Status Code: N/A - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidArgument - // - // - Description: Invalid Argument - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidBucketName - // - // - Description: The specified bucket is not valid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidBucketState - // - // - Description: The request is not valid with the current state of the bucket. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidDigest - // - // - Description: The Content-MD5 you specified is not valid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidEncryptionAlgorithmError - // - // - Description: The encryption request you specified is not valid. The valid - // value is AES256. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidLocationConstraint - // - // - Description: The specified location constraint is not valid. For more - // information about Regions, see [How to Select a Region for Your Buckets]. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidObjectState - // - // - Description: The action is not valid for the current state of the object. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPart - // - // - Description: One or more of the specified parts could not be found. The - // part might not have been uploaded, or the specified entity tag might not have - // matched the part's entity tag. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPartOrder - // - // - Description: The list of parts was not in ascending order. Parts list must - // be specified in order by part number. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPayer - // - // - Description: All access to this object has been disabled. Please contact - // Amazon Web Services Support for further assistance. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPolicyDocument - // - // - Description: The content of the form does not meet the conditions specified - // in the policy document. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidRange - // - // - Description: The requested range cannot be satisfied. - // - // - HTTP Status Code: 416 Requested Range Not Satisfiable - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidRequest - // - // - Description: Please use AWS4-HMAC-SHA256 . - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: SOAP requests must be made over an HTTPS connection. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration is not supported for buckets - // with non-DNS compliant names. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration is not supported for buckets - // with periods (.) in their names. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual - // style requests. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration is not supported on this - // bucket. Contact Amazon Web Services Support for more information. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this - // bucket. Contact Amazon Web Services Support for more information. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidSecurity - // - // - Description: The provided security credentials are not valid. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidSOAPRequest - // - // - Description: The SOAP request body is invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidStorageClass - // - // - Description: The storage class you specified is not valid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidTargetBucketForLogging - // - // - Description: The target bucket for logging does not exist, is not owned by - // you, or does not have the appropriate grants for the log-delivery group. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidToken - // - // - Description: The provided token is malformed or otherwise invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidURI - // - // - Description: Couldn't parse the specified URI. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: KeyTooLongError - // - // - Description: Your key is too long. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MalformedACLError - // - // - Description: The XML you provided was not well-formed or did not validate - // against our published schema. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MalformedPOSTRequest - // - // - Description: The body of your POST request is not well-formed - // multipart/form-data. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MalformedXML - // - // - Description: This happens when the user sends malformed XML (XML that - // doesn't conform to the published XSD) for the configuration. The error message - // is, "The XML you provided was not well-formed or did not validate against our - // published schema." - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MaxMessageLengthExceeded - // - // - Description: Your request was too big. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MaxPostPreDataLengthExceededError - // - // - Description: Your POST request fields preceding the upload file were too - // large. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MetadataTooLarge - // - // - Description: Your metadata headers exceed the maximum allowed metadata size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MethodNotAllowed - // - // - Description: The specified method is not allowed against this resource. - // - // - HTTP Status Code: 405 Method Not Allowed - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingAttachment - // - // - Description: A SOAP attachment was expected, but none were found. - // - // - HTTP Status Code: N/A - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingContentLength - // - // - Description: You must provide the Content-Length HTTP header. - // - // - HTTP Status Code: 411 Length Required - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingRequestBodyError - // - // - Description: This happens when the user sends an empty XML document as a - // request. The error message is, "Request body is empty." - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingSecurityElement - // - // - Description: The SOAP 1.1 request is missing a security element. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingSecurityHeader - // - // - Description: Your request is missing a required header. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoLoggingStatusForKey - // - // - Description: There is no such thing as a logging status subresource for a - // key. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchBucket - // - // - Description: The specified bucket does not exist. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchBucketPolicy - // - // - Description: The specified bucket does not have a bucket policy. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchKey - // - // - Description: The specified key does not exist. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchLifecycleConfiguration - // - // - Description: The lifecycle configuration does not exist. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchUpload - // - // - Description: The specified multipart upload does not exist. The upload ID - // might be invalid, or the multipart upload might have been aborted or completed. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchVersion - // - // - Description: Indicates that the version ID specified in the request does - // not match an existing version. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NotImplemented - // - // - Description: A header you provided implies functionality that is not - // implemented. - // - // - HTTP Status Code: 501 Not Implemented - // - // - SOAP Fault Code Prefix: Server - // - // - Code: NotSignedUp - // - // - Description: Your account is not signed up for the Amazon S3 service. You - // must sign up before you can use Amazon S3. You can sign up at the following URL: - // [Amazon S3] - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: OperationAborted - // - // - Description: A conflicting conditional action is currently in progress - // against this resource. Try again. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: PermanentRedirect - // - // - Description: The bucket you are attempting to access must be addressed - // using the specified endpoint. Send all future requests to this endpoint. - // - // - HTTP Status Code: 301 Moved Permanently - // - // - SOAP Fault Code Prefix: Client - // - // - Code: PreconditionFailed - // - // - Description: At least one of the preconditions you specified did not hold. - // - // - HTTP Status Code: 412 Precondition Failed - // - // - SOAP Fault Code Prefix: Client - // - // - Code: Redirect - // - // - Description: Temporary redirect. - // - // - HTTP Status Code: 307 Moved Temporarily - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RestoreAlreadyInProgress - // - // - Description: Object restore is already in progress. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestIsNotMultiPartContent - // - // - Description: Bucket POST must be of the enclosure-type multipart/form-data. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestTimeout - // - // - Description: Your socket connection to the server was not read from or - // written to within the timeout period. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestTimeTooSkewed - // - // - Description: The difference between the request time and the server's time - // is too large. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestTorrentOfBucketError - // - // - Description: Requesting the torrent file of a bucket is not permitted. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: SignatureDoesNotMatch - // - // - Description: The request signature we calculated does not match the - // signature you provided. Check your Amazon Web Services secret access key and - // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: ServiceUnavailable - // - // - Description: Service is unable to handle request. - // - // - HTTP Status Code: 503 Service Unavailable - // - // - SOAP Fault Code Prefix: Server - // - // - Code: SlowDown - // - // - Description: Reduce your request rate. - // - // - HTTP Status Code: 503 Slow Down - // - // - SOAP Fault Code Prefix: Server - // - // - Code: TemporaryRedirect - // - // - Description: You are being redirected to the bucket while DNS updates. - // - // - HTTP Status Code: 307 Moved Temporarily - // - // - SOAP Fault Code Prefix: Client - // - // - Code: TokenRefreshRequired - // - // - Description: The provided token must be refreshed. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: TooManyBuckets - // - // - Description: You have attempted to create more buckets than allowed. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: UnexpectedContent - // - // - Description: This request does not support content. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: UnresolvableGrantByEmailAddress - // - // - Description: The email address you provided does not match any account on - // record. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: UserKeyMustBeSpecified - // - // - Description: The bucket POST must contain the specified field name. If it - // is specified, check the order of the fields. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro - // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html - // [Amazon S3]: http://aws.amazon.com/s3 - // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html - Code *string - - // The error key. - Key *string - - // The error message contains a generic description of the error condition in - // English. It is intended for a human audience. Simple programs display the - // message directly to the end user if they encounter an error condition they don't - // know how or don't care to handle. Sophisticated programs with more exhaustive - // error handling and proper internationalization are more likely to ignore the - // error message. - Message *string - - // The version ID of the error. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -// If an S3 Metadata V1 CreateBucketMetadataTableConfiguration or V2 -// -// CreateBucketMetadataConfiguration request succeeds, but S3 Metadata was unable -// to create the table, this structure contains the error code and error message. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -type ErrorDetails struct { - - // If the V1 CreateBucketMetadataTableConfiguration request succeeds, but S3 - // Metadata was unable to create the table, this structure contains the error code. - // The possible error codes and error messages are as follows: - // - // - AccessDeniedCreatingResources - You don't have sufficient permissions to - // create the required resources. Make sure that you have - // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and - // s3tables:PutTablePolicy permissions, and then try again. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - AccessDeniedWritingToTable - Unable to write to the metadata table because - // of missing resource permissions. To fix the resource policy, Amazon S3 needs to - // create a new metadata table. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - DestinationTableNotFound - The destination table doesn't exist. To create a - // new metadata table, you must delete the metadata configuration for this bucket, - // and then create a new metadata configuration. - // - // - ServerInternalError - An internal error has occurred. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableAlreadyExists - The table that you specified already exists in the - // table bucket's namespace. Specify a different table name. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableBucketNotFound - The table bucket that you specified doesn't exist in - // this Amazon Web Services Region and account. Create or choose a different table - // bucket. To create a new metadata table, you must delete the metadata - // configuration for this bucket, and then create a new metadata configuration. - // - // If the V2 CreateBucketMetadataConfiguration request succeeds, but S3 Metadata - // was unable to create the table, this structure contains the error code. The - // possible error codes and error messages are as follows: - // - // - AccessDeniedCreatingResources - You don't have sufficient permissions to - // create the required resources. Make sure that you have - // s3tables:CreateTableBucket , s3tables:CreateNamespace , s3tables:CreateTable , - // s3tables:GetTable , s3tables:PutTablePolicy , kms:DescribeKey , and - // s3tables:PutTableEncryption permissions. Additionally, ensure that the KMS key - // used to encrypt the table still exists, is active and has a resource policy - // granting access to the S3 service principals ' - // maintenance.s3tables.amazonaws.com ' and ' metadata.s3.amazonaws.com '. To - // create a new metadata table, you must delete the metadata configuration for this - // bucket, and then create a new metadata configuration. - // - // - AccessDeniedWritingToTable - Unable to write to the metadata table because - // of missing resource permissions. To fix the resource policy, Amazon S3 needs to - // create a new metadata table. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - DestinationTableNotFound - The destination table doesn't exist. To create a - // new metadata table, you must delete the metadata configuration for this bucket, - // and then create a new metadata configuration. - // - // - ServerInternalError - An internal error has occurred. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - JournalTableAlreadyExists - A journal table already exists in the Amazon Web - // Services managed table bucket's namespace. Delete the journal table, and then - // try again. To create a new metadata table, you must delete the metadata - // configuration for this bucket, and then create a new metadata configuration. - // - // - InventoryTableAlreadyExists - An inventory table already exists in the - // Amazon Web Services managed table bucket's namespace. Delete the inventory - // table, and then try again. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - JournalTableNotAvailable - The journal table that the inventory table relies - // on has a FAILED status. An inventory table requires a journal table with an - // ACTIVE status. To create a new journal or inventory table, you must delete the - // metadata configuration for this bucket, along with any journal or inventory - // tables, and then create a new metadata configuration. - // - // - NoSuchBucket - The specified general purpose bucket does not exist. - ErrorCode *string - - // If the V1 CreateBucketMetadataTableConfiguration request succeeds, but S3 - // Metadata was unable to create the table, this structure contains the error - // message. The possible error codes and error messages are as follows: - // - // - AccessDeniedCreatingResources - You don't have sufficient permissions to - // create the required resources. Make sure that you have - // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and - // s3tables:PutTablePolicy permissions, and then try again. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - AccessDeniedWritingToTable - Unable to write to the metadata table because - // of missing resource permissions. To fix the resource policy, Amazon S3 needs to - // create a new metadata table. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - DestinationTableNotFound - The destination table doesn't exist. To create a - // new metadata table, you must delete the metadata configuration for this bucket, - // and then create a new metadata configuration. - // - // - ServerInternalError - An internal error has occurred. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableAlreadyExists - The table that you specified already exists in the - // table bucket's namespace. Specify a different table name. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableBucketNotFound - The table bucket that you specified doesn't exist in - // this Amazon Web Services Region and account. Create or choose a different table - // bucket. To create a new metadata table, you must delete the metadata - // configuration for this bucket, and then create a new metadata configuration. - // - // If the V2 CreateBucketMetadataConfiguration request succeeds, but S3 Metadata - // was unable to create the table, this structure contains the error code. The - // possible error codes and error messages are as follows: - // - // - AccessDeniedCreatingResources - You don't have sufficient permissions to - // create the required resources. Make sure that you have - // s3tables:CreateTableBucket , s3tables:CreateNamespace , s3tables:CreateTable , - // s3tables:GetTable , s3tables:PutTablePolicy , kms:DescribeKey , and - // s3tables:PutTableEncryption permissions. Additionally, ensure that the KMS key - // used to encrypt the table still exists, is active and has a resource policy - // granting access to the S3 service principals ' - // maintenance.s3tables.amazonaws.com ' and ' metadata.s3.amazonaws.com '. To - // create a new metadata table, you must delete the metadata configuration for this - // bucket, and then create a new metadata configuration. - // - // - AccessDeniedWritingToTable - Unable to write to the metadata table because - // of missing resource permissions. To fix the resource policy, Amazon S3 needs to - // create a new metadata table. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - DestinationTableNotFound - The destination table doesn't exist. To create a - // new metadata table, you must delete the metadata configuration for this bucket, - // and then create a new metadata configuration. - // - // - ServerInternalError - An internal error has occurred. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - JournalTableAlreadyExists - A journal table already exists in the Amazon Web - // Services managed table bucket's namespace. Delete the journal table, and then - // try again. To create a new metadata table, you must delete the metadata - // configuration for this bucket, and then create a new metadata configuration. - // - // - InventoryTableAlreadyExists - An inventory table already exists in the - // Amazon Web Services managed table bucket's namespace. Delete the inventory - // table, and then try again. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - JournalTableNotAvailable - The journal table that the inventory table relies - // on has a FAILED status. An inventory table requires a journal table with an - // ACTIVE status. To create a new journal or inventory table, you must delete the - // metadata configuration for this bucket, along with any journal or inventory - // tables, and then create a new metadata configuration. - // - // - NoSuchBucket - The specified general purpose bucket does not exist. - ErrorMessage *string - - noSmithyDocumentSerde -} - -// The error information. -type ErrorDocument struct { - - // The object key name to use when a 4XX class error occurs. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // This member is required. - Key *string - - noSmithyDocumentSerde -} - -// A container for specifying the configuration for Amazon EventBridge. -type EventBridgeConfiguration struct { - noSmithyDocumentSerde -} - -// Optional configuration to replicate existing source bucket objects. -// -// This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in -// the Amazon S3 User Guide. -// -// [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html -type ExistingObjectReplication struct { - - // Specifies whether Amazon S3 replicates existing source bucket objects. - // - // This member is required. - Status ExistingObjectReplicationStatus - - noSmithyDocumentSerde -} - -// Specifies the Amazon S3 object key name to filter on. An object key name is the -// name assigned to an object in your Amazon S3 bucket. You specify whether to -// filter on the suffix or prefix of the object key name. A prefix is a specific -// string of characters at the beginning of an object key name, which you can use -// to organize objects. For example, you can start the key names of related objects -// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to -// find objects in a bucket with key names that have the same prefix. A suffix is -// similar to a prefix, but it is at the end of the object key name instead of at -// the beginning. -type FilterRule struct { - - // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the - // Amazon S3 User Guide. - // - // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - Name FilterRuleName - - // The value that the filter searches for in object key names. - Value *string - - noSmithyDocumentSerde -} - -// The S3 Metadata configuration for a general purpose bucket. -type GetBucketMetadataConfigurationResult struct { - - // The metadata configuration for a general purpose bucket. - // - // This member is required. - MetadataConfigurationResult *MetadataConfigurationResult - - noSmithyDocumentSerde -} - -// The V1 S3 Metadata configuration for a general purpose bucket. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -type GetBucketMetadataTableConfigurationResult struct { - - // The V1 S3 Metadata configuration for a general purpose bucket. - // - // This member is required. - MetadataTableConfigurationResult *MetadataTableConfigurationResult - - // The status of the metadata table. The status values are: - // - // - CREATING - The metadata table is in the process of being created in the - // specified table bucket. - // - // - ACTIVE - The metadata table has been created successfully, and records are - // being delivered to the table. - // - // - FAILED - Amazon S3 is unable to create the metadata table, or Amazon S3 is - // unable to deliver records. See ErrorDetails for details. - // - // This member is required. - Status *string - - // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 - // Metadata was unable to create the table, this structure contains the error code - // and error message. - Error *ErrorDetails - - noSmithyDocumentSerde -} - -// A collection of parts associated with a multipart upload. -type GetObjectAttributesParts struct { - - // Indicates whether the returned list of parts is truncated. A value of true - // indicates that the list was truncated. A list can be truncated if the number of - // parts exceeds the limit returned in the MaxParts element. - IsTruncated *bool - - // The maximum number of parts allowed in the response. - MaxParts *int32 - - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the PartNumberMarker request parameter in a - // subsequent request. - NextPartNumberMarker *string - - // The marker for the current part. - PartNumberMarker *string - - // A container for elements related to a particular part. A response can contain - // zero or more Parts elements. - // - // - General purpose buckets - For GetObjectAttributes , if an additional - // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , - // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) isn't applied to the object - // specified in the request, the response doesn't return the Part element. - // - // - Directory buckets - For GetObjectAttributes , regardless of whether an - // additional checksum is applied to the object specified in the request, the - // response returns the Part element. - Parts []ObjectPart - - // The total number of parts. - TotalPartsCount *int32 - - noSmithyDocumentSerde -} - -// Container for S3 Glacier job parameters. -type GlacierJobParameters struct { - - // Retrieval tier at which the restore will be processed. - // - // This member is required. - Tier Tier - - noSmithyDocumentSerde -} - -// Container for grant information. -type Grant struct { - - // The person being granted permissions. - Grantee *Grantee - - // Specifies the permission given to the grantee. - Permission Permission - - noSmithyDocumentSerde -} - -// Container for the person being granted permissions. -type Grantee struct { - - // Type of grantee - // - // This member is required. - Type Type - - // Screen name of the grantee. - DisplayName *string - - // Email address of the grantee. - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the - // Amazon Web Services General Reference. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - EmailAddress *string - - // The canonical user ID of the grantee. - ID *string - - // URI of the grantee group. - URI *string - - noSmithyDocumentSerde -} - -// Container for the Suffix element. -type IndexDocument struct { - - // A suffix that is appended to a request that is for a directory on the website - // endpoint. (For example, if the suffix is index.html and you make a request to - // samplebucket/images/ , the data that is returned will be for the object with the - // key name images/index.html .) The suffix must not be empty and must not include - // a slash character. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // This member is required. - Suffix *string - - noSmithyDocumentSerde -} - -// Container element that identifies who initiated the multipart upload. -type Initiator struct { - - // Name of the Principal. - // - // This functionality is not supported for directory buckets. - DisplayName *string - - // If the principal is an Amazon Web Services account, it provides the Canonical - // User ID. If the principal is an IAM User, it provides a user ARN value. - // - // Directory buckets - If the principal is an Amazon Web Services account, it - // provides the Amazon Web Services account ID. If the principal is an IAM User, it - // provides a user ARN value. - ID *string - - noSmithyDocumentSerde -} - -// Describes the serialization format of the object. -type InputSerialization struct { - - // Describes the serialization of a CSV-encoded object. - CSV *CSVInput - - // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default - // Value: NONE. - CompressionType CompressionType - - // Specifies JSON as object's input serialization format. - JSON *JSONInput - - // Specifies Parquet as object's input serialization format. - Parquet *ParquetInput - - noSmithyDocumentSerde -} - -// A container for specifying S3 Intelligent-Tiering filters. The filters -// determine the subset of objects to which the rule applies. -type IntelligentTieringAndOperator struct { - - // An object key name prefix that identifies the subset of objects to which the - // configuration applies. - Prefix *string - - // All of these tags must exist in the object's tag set in order for the - // configuration to apply. - Tags []Tag - - noSmithyDocumentSerde -} - -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. -// -// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -type IntelligentTieringConfiguration struct { - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - // Specifies the status of the configuration. - // - // This member is required. - Status IntelligentTieringStatus - - // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. - // - // This member is required. - Tierings []Tiering - - // Specifies a bucket filter. The configuration only includes objects that meet - // the filter's criteria. - Filter *IntelligentTieringFilter - - noSmithyDocumentSerde -} - -// The Filter is used to identify objects that the S3 Intelligent-Tiering -// configuration applies to. -type IntelligentTieringFilter struct { - - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *IntelligentTieringAndOperator - - // An object key name prefix that identifies the subset of objects to which the - // rule applies. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - Prefix *string - - // A container of a key value name pair. - Tag *Tag - - noSmithyDocumentSerde -} - -// Specifies the S3 Inventory configuration for an Amazon S3 bucket. For more -// information, see [GET Bucket inventory]in the Amazon S3 API Reference. -// -// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html -type InventoryConfiguration struct { - - // Contains information about where to publish the inventory results. - // - // This member is required. - Destination *InventoryDestination - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // Object versions to include in the inventory list. If set to All , the list - // includes all the object versions, which adds the version-related fields - // VersionId , IsLatest , and DeleteMarker to the list. If set to Current , the - // list does not contain these version-related fields. - // - // This member is required. - IncludedObjectVersions InventoryIncludedObjectVersions - - // Specifies whether the inventory is enabled or disabled. If set to True , an - // inventory list is generated. If set to False , no inventory list is generated. - // - // This member is required. - IsEnabled *bool - - // Specifies the schedule for generating inventory results. - // - // This member is required. - Schedule *InventorySchedule - - // Specifies an inventory filter. The inventory only includes objects that meet - // the filter's criteria. - Filter *InventoryFilter - - // Contains the optional fields that are included in the inventory results. - OptionalFields []InventoryOptionalField - - noSmithyDocumentSerde -} - -// Specifies the S3 Inventory configuration for an Amazon S3 bucket. -type InventoryDestination struct { - - // Contains the bucket name, file format, bucket owner (optional), and prefix - // (optional) where inventory results are published. - // - // This member is required. - S3BucketDestination *InventoryS3BucketDestination - - noSmithyDocumentSerde -} - -// Contains the type of server-side encryption used to encrypt the S3 Inventory -// results. -type InventoryEncryption struct { - - // Specifies the use of SSE-KMS to encrypt delivered inventory reports. - SSEKMS *SSEKMS - - // Specifies the use of SSE-S3 to encrypt delivered inventory reports. - SSES3 *SSES3 - - noSmithyDocumentSerde -} - -// Specifies an S3 Inventory filter. The inventory only includes objects that meet -// the filter's criteria. -type InventoryFilter struct { - - // The prefix that an object must have to be included in the inventory results. - // - // This member is required. - Prefix *string - - noSmithyDocumentSerde -} - -// Contains the bucket name, file format, bucket owner (optional), and prefix -// (optional) where S3 Inventory results are published. -type InventoryS3BucketDestination struct { - - // The Amazon Resource Name (ARN) of the bucket where inventory results will be - // published. - // - // This member is required. - Bucket *string - - // Specifies the output format of the inventory results. - // - // This member is required. - Format InventoryFormat - - // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. - // - // Although this value is optional, we strongly recommend that you set it to help - // prevent problems if the destination bucket ownership changes. - AccountId *string - - // Contains the type of server-side encryption used to encrypt the inventory - // results. - Encryption *InventoryEncryption - - // The prefix that is prepended to all inventory results. - Prefix *string - - noSmithyDocumentSerde -} - -// Specifies the schedule for generating S3 Inventory results. -type InventorySchedule struct { - - // Specifies how frequently inventory results are produced. - // - // This member is required. - Frequency InventoryFrequency - - noSmithyDocumentSerde -} - -// The inventory table configuration for an S3 Metadata configuration. -type InventoryTableConfiguration struct { - - // The configuration state of the inventory table, indicating whether the - // inventory table is enabled or disabled. - // - // This member is required. - ConfigurationState InventoryConfigurationState - - // The encryption configuration for the inventory table. - EncryptionConfiguration *MetadataTableEncryptionConfiguration - - noSmithyDocumentSerde -} - -// The inventory table configuration for an S3 Metadata configuration. -type InventoryTableConfigurationResult struct { - - // The configuration state of the inventory table, indicating whether the - // inventory table is enabled or disabled. - // - // This member is required. - ConfigurationState InventoryConfigurationState - - // If an S3 Metadata V1 CreateBucketMetadataTableConfiguration or V2 - // CreateBucketMetadataConfiguration request succeeds, but S3 Metadata was unable - // to create the table, this structure contains the error code and error message. - // - // If you created your S3 Metadata configuration before July 15, 2025, we - // recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you - // can expire journal table records and create a live inventory table. - // - // [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html - Error *ErrorDetails - - // The Amazon Resource Name (ARN) for the inventory table. - TableArn *string - - // The name of the inventory table. - TableName *string - - // The status of the inventory table. The status values are: - // - // - CREATING - The inventory table is in the process of being created in the - // specified Amazon Web Services managed table bucket. - // - // - BACKFILLING - The inventory table is in the process of being backfilled. - // When you enable the inventory table for your metadata configuration, the table - // goes through a process known as backfilling, during which Amazon S3 scans your - // general purpose bucket to retrieve the initial metadata for all objects in the - // bucket. Depending on the number of objects in your bucket, this process can take - // several hours. When the backfilling process is finished, the status of your - // inventory table changes from BACKFILLING to ACTIVE . After backfilling is - // completed, updates to your objects are reflected in the inventory table within - // one hour. - // - // - ACTIVE - The inventory table has been created successfully, and records are - // being delivered to the table. - // - // - FAILED - Amazon S3 is unable to create the inventory table, or Amazon S3 is - // unable to deliver records. - TableStatus *string - - noSmithyDocumentSerde -} - -// The specified updates to the S3 Metadata inventory table configuration. -type InventoryTableConfigurationUpdates struct { - - // The configuration state of the inventory table, indicating whether the - // inventory table is enabled or disabled. - // - // This member is required. - ConfigurationState InventoryConfigurationState - - // The encryption configuration for the inventory table. - EncryptionConfiguration *MetadataTableEncryptionConfiguration - - noSmithyDocumentSerde -} - -// The journal table configuration for an S3 Metadata configuration. -type JournalTableConfiguration struct { - - // The journal table record expiration settings for the journal table. - // - // This member is required. - RecordExpiration *RecordExpiration - - // The encryption configuration for the journal table. - EncryptionConfiguration *MetadataTableEncryptionConfiguration - - noSmithyDocumentSerde -} - -// The journal table configuration for the S3 Metadata configuration. -type JournalTableConfigurationResult struct { - - // The journal table record expiration settings for the journal table. - // - // This member is required. - RecordExpiration *RecordExpiration - - // The name of the journal table. - // - // This member is required. - TableName *string - - // The status of the journal table. The status values are: - // - // - CREATING - The journal table is in the process of being created in the - // specified table bucket. - // - // - ACTIVE - The journal table has been created successfully, and records are - // being delivered to the table. - // - // - FAILED - Amazon S3 is unable to create the journal table, or Amazon S3 is - // unable to deliver records. - // - // This member is required. - TableStatus *string - - // If an S3 Metadata V1 CreateBucketMetadataTableConfiguration or V2 - // CreateBucketMetadataConfiguration request succeeds, but S3 Metadata was unable - // to create the table, this structure contains the error code and error message. - // - // If you created your S3 Metadata configuration before July 15, 2025, we - // recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you - // can expire journal table records and create a live inventory table. - // - // [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html - Error *ErrorDetails - - // The Amazon Resource Name (ARN) for the journal table. - TableArn *string - - noSmithyDocumentSerde -} - -// The specified updates to the S3 Metadata journal table configuration. -type JournalTableConfigurationUpdates struct { - - // The journal table record expiration settings for the journal table. - // - // This member is required. - RecordExpiration *RecordExpiration - - noSmithyDocumentSerde -} - -// Specifies JSON as object's input serialization format. -type JSONInput struct { - - // The type of JSON. Valid values: Document, Lines. - Type JSONType - - noSmithyDocumentSerde -} - -// Specifies JSON as request's output serialization format. -type JSONOutput struct { - - // The value used to separate individual records in the output. If no value is - // specified, Amazon S3 uses a newline character ('\n'). - RecordDelimiter *string - - noSmithyDocumentSerde -} - -// A container for specifying the configuration for Lambda notifications. -type LambdaFunctionConfiguration struct { - - // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see [Supported Event Types]in the Amazon S3 User Guide. - // - // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - // - // This member is required. - Events []Event - - // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes - // when the specified event type occurs. - // - // This member is required. - LambdaFunctionArn *string - - // Specifies object key name filtering rules. For information about key name - // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. - // - // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html - Filter *NotificationConfigurationFilter - - // An optional unique identifier for configurations in a notification - // configuration. If you don't provide one, Amazon S3 will assign an ID. - Id *string - - noSmithyDocumentSerde -} - -// Container for the expiration for the lifecycle of the object. -// -// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. -// -// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html -type LifecycleExpiration struct { - - // Indicates at what date the object is to be moved or deleted. The date value - // must conform to the ISO 8601 format. The time is always midnight UTC. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - Date *time.Time - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int32 - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false the - // policy takes no action. This cannot be specified with Days or Date in a - // Lifecycle Expiration Policy. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpiredObjectDeleteMarker *bool - - noSmithyDocumentSerde -} - -// A lifecycle rule for individual objects in an Amazon S3 bucket. -// -// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. -// -// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html -type LifecycleRule struct { - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is - // not currently being applied. - // - // This member is required. - Status ExpirationStatus - - // Specifies the days since the initiation of an incomplete multipart upload that - // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. - // - // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload - - // Specifies the expiration for the lifecycle of the object in the form of date, - // days and, whether the object has a delete marker. - Expiration *LifecycleExpiration - - // The Filter is used to identify objects that a Lifecycle Rule applies to. A - // Filter must have exactly one of Prefix , Tag , ObjectSizeGreaterThan , - // ObjectSizeLessThan , or And specified. Filter is required if the LifecycleRule - // does not contain a Prefix element. - // - // For more information about Tag filters, see [Adding filters to Lifecycle rules] in the Amazon S3 User Guide. - // - // Tag filters are not supported for directory buckets. - // - // [Adding filters to Lifecycle rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-filters.html - Filter *LifecycleRuleFilter - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 - // permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) to - // request that Amazon S3 delete noncurrent object versions at a specific period in - // the object's lifetime. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - NoncurrentVersionExpiration *NoncurrentVersionExpiration - - // Specifies the transition rule for the lifecycle rule that describes when - // noncurrent objects transition to a specific storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action to - // request that Amazon S3 transition noncurrent object versions to a specific - // storage class at a set period in the object's lifetime. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - NoncurrentVersionTransitions []NoncurrentVersionTransition - - // Prefix identifying one or more objects to which the rule applies. This is no - // longer used; use Filter instead. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // Deprecated: This member has been deprecated. - Prefix *string - - // Specifies when an Amazon S3 object transitions to a specified storage class. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - Transitions []Transition - - noSmithyDocumentSerde -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more -// predicates. The Lifecycle Rule will apply to any object matching all of the -// predicates configured inside the And operator. -type LifecycleRuleAndOperator struct { - - // Minimum object size to which the rule applies. - ObjectSizeGreaterThan *int64 - - // Maximum object size to which the rule applies. - ObjectSizeLessThan *int64 - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string - - // All of these tags must exist in the object's tag set in order for the rule to - // apply. - Tags []Tag - - noSmithyDocumentSerde -} - -// The Filter is used to identify objects that a Lifecycle Rule applies to. A -// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , -// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the -// Lifecycle Rule applies to all objects in the bucket. -type LifecycleRuleFilter struct { - - // This is used in a Lifecycle Rule Filter to apply a logical AND to two or more - // predicates. The Lifecycle Rule will apply to any object matching all of the - // predicates configured inside the And operator. - And *LifecycleRuleAndOperator - - // Minimum object size to which the rule applies. - ObjectSizeGreaterThan *int64 - - // Maximum object size to which the rule applies. - ObjectSizeLessThan *int64 - - // Prefix identifying one or more objects to which the rule applies. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - Prefix *string - - // This tag must exist in the object's tag set in order for the rule to apply. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - Tag *Tag - - noSmithyDocumentSerde -} - -// Specifies the location where the bucket will be created. -// -// For directory buckets, the location type is Availability Zone or Local Zone. -// For more information about directory buckets, see [Working with directory buckets]in the Amazon S3 User Guide. -// -// This functionality is only supported by directory buckets. -// -// [Working with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -type LocationInfo struct { - - // The name of the location where the bucket will be created. - // - // For directory buckets, the name of the location is the Zone ID of the - // Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An - // example AZ ID value is usw2-az1 . - Name *string - - // The type of location where the bucket will be created. - Type LocationType - - noSmithyDocumentSerde -} - -// Describes where logs are stored and the prefix that Amazon S3 assigns to all -// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API -// Reference. -// -// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html -type LoggingEnabled struct { - - // Specifies the bucket where you want Amazon S3 to store server access logs. You - // can have your logs delivered to any bucket that you own, including the same - // bucket that is being logged. You can also configure multiple buckets to deliver - // their logs to the same target bucket. In this case, you should choose a - // different TargetPrefix for each source bucket so that the delivered log files - // can be distinguished by key. - // - // This member is required. - TargetBucket *string - - // A prefix for all log object keys. If you store log files from multiple Amazon - // S3 buckets in a single bucket, you can use a prefix to distinguish which log - // files came from which bucket. - // - // This member is required. - TargetPrefix *string - - // Container for granting information. - // - // Buckets that use the bucket owner enforced setting for Object Ownership don't - // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. - // - // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general - TargetGrants []TargetGrant - - // Amazon S3 key format for log objects. - TargetObjectKeyFormat *TargetObjectKeyFormat - - noSmithyDocumentSerde -} - -// The S3 Metadata configuration for a general purpose bucket. -type MetadataConfiguration struct { - - // The journal table configuration for a metadata configuration. - // - // This member is required. - JournalTableConfiguration *JournalTableConfiguration - - // The inventory table configuration for a metadata configuration. - InventoryTableConfiguration *InventoryTableConfiguration - - noSmithyDocumentSerde -} - -// The S3 Metadata configuration for a general purpose bucket. -type MetadataConfigurationResult struct { - - // The destination settings for a metadata configuration. - // - // This member is required. - DestinationResult *DestinationResult - - // The inventory table configuration for a metadata configuration. - InventoryTableConfigurationResult *InventoryTableConfigurationResult - - // The journal table configuration for a metadata configuration. - JournalTableConfigurationResult *JournalTableConfigurationResult - - noSmithyDocumentSerde -} - -// A metadata key-value pair to store with an object. -type MetadataEntry struct { - - // Name of the object. - Name *string - - // Value of the object. - Value *string - - noSmithyDocumentSerde -} - -// The V1 S3 Metadata configuration for a general purpose bucket. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -type MetadataTableConfiguration struct { - - // The destination information for the metadata table configuration. The - // destination table bucket must be in the same Region and Amazon Web Services - // account as the general purpose bucket. The specified metadata table name must be - // unique within the aws_s3_metadata namespace in the destination table bucket. - // - // This member is required. - S3TablesDestination *S3TablesDestination - - noSmithyDocumentSerde -} - -// The V1 S3 Metadata configuration for a general purpose bucket. The destination -// -// table bucket must be in the same Region and Amazon Web Services account as the -// general purpose bucket. The specified metadata table name must be unique within -// the aws_s3_metadata namespace in the destination table bucket. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -type MetadataTableConfigurationResult struct { - - // The destination information for the metadata table configuration. The - // destination table bucket must be in the same Region and Amazon Web Services - // account as the general purpose bucket. The specified metadata table name must be - // unique within the aws_s3_metadata namespace in the destination table bucket. - // - // This member is required. - S3TablesDestinationResult *S3TablesDestinationResult - - noSmithyDocumentSerde -} - -// The encryption settings for an S3 Metadata journal table or inventory table -// -// configuration. -type MetadataTableEncryptionConfiguration struct { - - // The encryption type specified for a metadata table. To specify server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS), use the aws:kms - // value. To specify server-side encryption with Amazon S3 managed keys (SSE-S3), - // use the AES256 value. - // - // This member is required. - SseAlgorithm TableSseAlgorithm - - // If server-side encryption with Key Management Service (KMS) keys (SSE-KMS) is - // specified, you must also specify the KMS key Amazon Resource Name (ARN). You - // must specify a customer-managed KMS key that's located in the same Region as the - // general purpose bucket that corresponds to the metadata table configuration. - KmsKeyArn *string - - noSmithyDocumentSerde -} - -// A container specifying replication metrics-related settings enabling -// -// replication metrics and events. -type Metrics struct { - - // Specifies whether the replication metrics are enabled. - // - // This member is required. - Status MetricsStatus - - // A container specifying the time threshold for emitting the - // s3:Replication:OperationMissedThreshold event. - EventThreshold *ReplicationTimeValue - - noSmithyDocumentSerde -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates, and an object -// must match all of the predicates in order for the filter to apply. -type MetricsAndOperator struct { - - // The access point ARN used when evaluating an AND predicate. - AccessPointArn *string - - // The prefix used when evaluating an AND predicate. - Prefix *string - - // The list of tags used when evaluating an AND predicate. - Tags []Tag - - noSmithyDocumentSerde -} - -// Specifies a metrics configuration for the CloudWatch request metrics (specified -// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an -// existing metrics configuration, note that this is a full replacement of the -// existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. -// -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html -type MetricsConfiguration struct { - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // Specifies a metrics configuration filter. The metrics configuration will only - // include objects that meet the filter's criteria. A filter must be a prefix, an - // object tag, an access point ARN, or a conjunction (MetricsAndOperator). - Filter MetricsFilter - - noSmithyDocumentSerde -} - -// Specifies a metrics configuration filter. The metrics configuration only -// includes objects that meet the filter's criteria. A filter must be a prefix, an -// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see [PutBucketMetricsConfiguration]. -// -// The following types satisfy this interface: -// -// MetricsFilterMemberAccessPointArn -// MetricsFilterMemberAnd -// MetricsFilterMemberPrefix -// MetricsFilterMemberTag -// -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -type MetricsFilter interface { - isMetricsFilter() -} - -// The access point ARN used when evaluating a metrics filter. -type MetricsFilterMemberAccessPointArn struct { - Value string - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates, and an object -// must match all of the predicates in order for the filter to apply. -type MetricsFilterMemberAnd struct { - Value MetricsAndOperator - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberAnd) isMetricsFilter() {} - -// The prefix used when evaluating a metrics filter. -type MetricsFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberPrefix) isMetricsFilter() {} - -// The tag used when evaluating a metrics filter. -type MetricsFilterMemberTag struct { - Value Tag - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberTag) isMetricsFilter() {} - -// Container for the MultipartUpload for the Amazon S3 object. -type MultipartUpload struct { - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm ChecksumAlgorithm - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time - - // Identifies who initiated the multipart upload. - Initiator *Initiator - - // Key of the object for which the multipart upload was initiated. - Key *string - - // Specifies the owner of the object that is part of the multipart upload. - // - // Directory buckets - The bucket owner is returned as the object owner for all - // the objects. - Owner *Owner - - // The class of storage used to store the object. - // - // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. - StorageClass StorageClass - - // Upload ID that identifies the multipart upload. - UploadId *string - - noSmithyDocumentSerde -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 -// permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) to -// request that Amazon S3 delete noncurrent object versions at a specific period in -// the object's lifetime. -// -// This parameter applies to general purpose buckets only. It is not supported for -// directory bucket lifecycle configurations. -type NoncurrentVersionExpiration struct { - - // Specifies how many noncurrent versions Amazon S3 will retain. You can specify - // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any - // additional noncurrent versions beyond the specified number to retain. For more - // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html - NewerNoncurrentVersions *int32 - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 - // User Guide. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations - NoncurrentDays *int32 - - noSmithyDocumentSerde -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA , ONEZONE_IA , INTELLIGENT_TIERING , GLACIER_IR , -// GLACIER , or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled -// (or versioning is suspended), you can set this action to request that Amazon S3 -// transition noncurrent object versions to the STANDARD_IA , ONEZONE_IA , -// INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE storage class at a -// specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - - // Specifies how many noncurrent versions Amazon S3 will retain in the same - // storage class before transitioning objects. You can specify up to 100 noncurrent - // versions to retain. Amazon S3 will transition any additional noncurrent versions - // beyond the specified number to retain. For more information about noncurrent - // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. - // - // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html - NewerNoncurrentVersions *int32 - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. - // - // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations - NoncurrentDays *int32 - - // The class of storage used to store the object. - StorageClass TransitionStorageClass - - noSmithyDocumentSerde -} - -// A container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off for the bucket. -type NotificationConfiguration struct { - - // Enables delivery of events to Amazon EventBridge. - EventBridgeConfiguration *EventBridgeConfiguration - - // Describes the Lambda functions to invoke and the events for which to invoke - // them. - LambdaFunctionConfigurations []LambdaFunctionConfiguration - - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. - QueueConfigurations []QueueConfiguration - - // The topic to which notifications are sent and the events for which - // notifications are generated. - TopicConfigurations []TopicConfiguration - - noSmithyDocumentSerde -} - -// Specifies object key name filtering rules. For information about key name -// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. -// -// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html -type NotificationConfigurationFilter struct { - - // A container for object key name prefix and suffix filtering rules. - Key *S3KeyFilter - - noSmithyDocumentSerde -} - -// An object consists of data and its descriptive metadata. -type Object struct { - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm []ChecksumAlgorithm - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // The entity tag is a hash of the object. The ETag reflects changes only to the - // contents of an object, not its metadata. The ETag may or may not be an MD5 - // digest of the object data. Whether or not it is depends on how the object was - // created and how it is encrypted as described below: - // - // - Objects created by the PUT Object, POST Object, or Copy operation, or - // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 - // or plaintext, have ETags that are an MD5 digest of their object data. - // - // - Objects created by the PUT Object, POST Object, or Copy operation, or - // through the Amazon Web Services Management Console, and are encrypted by SSE-C - // or SSE-KMS, have ETags that are not an MD5 digest of their object data. - // - // - If an object is created by either the Multipart Upload or Part Copy - // operation, the ETag is not an MD5 digest, regardless of the method of - // encryption. If an object is larger than 16 MB, the Amazon Web Services - // Management Console will upload or copy that object as a Multipart Upload, and - // therefore the ETag will not be an MD5 digest. - // - // Directory buckets - MD5 is not supported by directory buckets. - ETag *string - - // The name that you assign to an object. You use the object key to retrieve the - // object. - Key *string - - // Creation date of the object. - LastModified *time.Time - - // The owner of the object - // - // Directory buckets - The bucket owner is returned as the object owner. - Owner *Owner - - // Specifies the restoration status of an object. Objects in certain storage - // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see [Working with archived objects]in the - // Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. Directory buckets - // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in - // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage - // class) in Dedicated Local Zones. - // - // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html - RestoreStatus *RestoreStatus - - // Size in bytes of the object - Size *int64 - - // The class of storage used to store the object. - // - // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 - // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 - // One Zone-Infrequent Access storage class) in Dedicated Local Zones. - StorageClass ObjectStorageClass - - noSmithyDocumentSerde -} - -// Object Identifier is unique value to identify objects. -type ObjectIdentifier struct { - - // Key name of the object. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // This member is required. - Key *string - - // An entity tag (ETag) is an identifier assigned by a web server to a specific - // version of a resource found at a URL. This header field makes the request method - // conditional on ETags . - // - // Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings - // unique to the object. - ETag *string - - // If present, the objects are deleted only if its modification times matches the - // provided Timestamp . - // - // This functionality is only supported for directory buckets. - LastModifiedTime *time.Time - - // If present, the objects are deleted only if its size matches the provided size - // in bytes. - // - // This functionality is only supported for directory buckets. - Size *int64 - - // Version ID for the specific version of the object to delete. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -// The container element for Object Lock configuration parameters. -type ObjectLockConfiguration struct { - - // Indicates whether this bucket has an Object Lock configuration enabled. Enable - // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. - ObjectLockEnabled ObjectLockEnabled - - // Specifies the Object Lock rule for the specified object. Enable the this rule - // when you apply ObjectLockConfiguration to a bucket. Bucket settings require - // both a mode and a period. The period can be either Days or Years but you must - // select one. You cannot specify Days and Years at the same time. - Rule *ObjectLockRule - - noSmithyDocumentSerde -} - -// A legal hold configuration for an object. -type ObjectLockLegalHold struct { - - // Indicates whether the specified object has a legal hold in place. - Status ObjectLockLegalHoldStatus - - noSmithyDocumentSerde -} - -// A Retention configuration for an object. -type ObjectLockRetention struct { - - // Indicates the Retention mode for the specified object. - Mode ObjectLockRetentionMode - - // The date on which this Object Lock Retention will expire. - RetainUntilDate *time.Time - - noSmithyDocumentSerde -} - -// The container element for an Object Lock rule. -type ObjectLockRule struct { - - // The default Object Lock retention mode and period that you want to apply to new - // objects placed in the specified bucket. Bucket settings require both a mode and - // a period. The period can be either Days or Years but you must select one. You - // cannot specify Days and Years at the same time. - DefaultRetention *DefaultRetention - - noSmithyDocumentSerde -} - -// A container for elements related to an individual part. -type ObjectPart struct { - - // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present - // if the multipart upload request was created with the CRC32 checksum algorithm. - // For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC32C checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC64NVME checksum - // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added - // the default checksum, CRC64NVME , to the uploaded object). For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present - // if the multipart upload request was created with the SHA1 checksum algorithm. - // For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is - // present if the multipart upload request was created with the SHA256 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // The part number identifying the part. This value is a positive integer between - // 1 and 10,000. - PartNumber *int32 - - // The size of the uploaded part in bytes. - Size *int64 - - noSmithyDocumentSerde -} - -// The version of an object. -type ObjectVersion struct { - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm []ChecksumAlgorithm - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // The entity tag is an MD5 hash of that version of the object. - ETag *string - - // Specifies whether the object is (true) or is not (false) the latest version of - // an object. - IsLatest *bool - - // The object key. - Key *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // Specifies the owner of the object. - Owner *Owner - - // Specifies the restoration status of an object. Objects in certain storage - // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see [Working with archived objects]in the - // Amazon S3 User Guide. - // - // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html - RestoreStatus *RestoreStatus - - // Size in bytes of the object. - Size *int64 - - // The class of storage used to store the object. - StorageClass ObjectVersionStorageClass - - // Version ID of an object. - VersionId *string - - noSmithyDocumentSerde -} - -// Describes the location where the restore job's output is stored. -type OutputLocation struct { - - // Describes an S3 location that will receive the results of the restore request. - S3 *S3Location - - noSmithyDocumentSerde -} - -// Describes how results of the Select job are serialized. -type OutputSerialization struct { - - // Describes the serialization of CSV-encoded Select results. - CSV *CSVOutput - - // Specifies JSON as request's output serialization format. - JSON *JSONOutput - - noSmithyDocumentSerde -} - -// End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning -// DisplayName . Update your applications to use canonical IDs (unique identifier -// for Amazon Web Services accounts), Amazon Web Services account ID (12 digit -// identifier) or IAM ARNs (full resource naming) as a direct replacement of -// DisplayName . -// -// This change affects the following Amazon Web Services Regions: US East (N. -// Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia -// Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) -// Region, Europe (Ireland) Region, and South America (São Paulo) Region. -// -// Container for the owner's display name and ID. -type Owner struct { - - // Container for the display name of the owner. This value is only supported in - // the following Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // This functionality is not supported for directory buckets. - DisplayName *string - - // Container for the ID of the owner. - ID *string - - noSmithyDocumentSerde -} - -// The container element for a bucket's ownership controls. -type OwnershipControls struct { - - // The container element for an ownership control rule. - // - // This member is required. - Rules []OwnershipControlsRule - - noSmithyDocumentSerde -} - -// The container element for an ownership control rule. -type OwnershipControlsRule struct { - - // The container element for object ownership for a bucket's ownership controls. - // - // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the - // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. - // - // ObjectWriter - The uploading account will own the object if the object is - // uploaded with the bucket-owner-full-control canned ACL. - // - // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer - // affect permissions. The bucket owner automatically owns and has full control - // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or specify bucket owner full control ACLs (such as the predefined - // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). - // - // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are - // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where - // you must control access for each object individually. For more information about - // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. Directory buckets - // use the bucket owner enforced setting for S3 Object Ownership. - // - // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - // - // This member is required. - ObjectOwnership ObjectOwnership - - noSmithyDocumentSerde -} - -// Container for Parquet. -type ParquetInput struct { - noSmithyDocumentSerde -} - -// Container for elements related to a part. -type Part struct { - - // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present - // if the object was uploaded with the CRC32 checksum algorithm. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is - // present if the object was uploaded with the CRC32C checksum algorithm. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC64NVME checksum - // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added - // the default checksum, CRC64NVME , to the uploaded object). For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present - // if the object was uploaded with the SHA1 checksum algorithm. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is - // present if the object was uploaded with the SHA256 checksum algorithm. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Entity tag returned when the part was uploaded. - ETag *string - - // Date and time at which the part was uploaded. - LastModified *time.Time - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int32 - - // Size in bytes of the uploaded part data. - Size *int64 - - noSmithyDocumentSerde -} - -// Amazon S3 keys for log objects are partitioned in the following format: -// -// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] -// -// PartitionedPrefix defaults to EventTime delivery when server access logs are -// delivered. -type PartitionedPrefix struct { - - // Specifies the partition date source for the partitioned prefix. - // PartitionDateSource can be EventTime or DeliveryTime . - // - // For DeliveryTime , the time in the log file names corresponds to the delivery - // time for the log files. - // - // For EventTime , The logs delivered are for a specific day only. The year, month, - // and day correspond to the day on which the event occurred, and the hour, minutes - // and seconds are set to 00 in the key. - PartitionDateSource PartitionDateSource - - noSmithyDocumentSerde -} - -// The container element for a bucket's policy status. -type PolicyStatus struct { - - // The policy status for this bucket. TRUE indicates that this bucket is public. - // FALSE indicates that the bucket is not public. - IsPublic *bool - - noSmithyDocumentSerde -} - -// This data type contains information about progress of an operation. -type Progress struct { - - // The current number of uncompressed object bytes processed. - BytesProcessed *int64 - - // The current number of bytes of records payload data returned. - BytesReturned *int64 - - // The current number of object bytes scanned. - BytesScanned *int64 - - noSmithyDocumentSerde -} - -// This data type contains information about the progress event of an operation. -type ProgressEvent struct { - - // The Progress event details. - Details *Progress - - noSmithyDocumentSerde -} - -// The PublicAccessBlock configuration that you want to apply to this Amazon S3 -// bucket. You can enable the configuration options in any combination. For more -// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in -// the Amazon S3 User Guide. -// -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -type PublicAccessBlockConfiguration struct { - - // Specifies whether Amazon S3 should block public access control lists (ACLs) for - // this bucket and objects in this bucket. Setting this element to TRUE causes the - // following behavior: - // - // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. - // - // - PUT Object calls fail if the request includes a public ACL. - // - // - PUT Bucket calls fail if the request includes a public ACL. - // - // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls *bool - - // Specifies whether Amazon S3 should block public bucket policies for this - // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT - // Bucket policy if the specified bucket policy allows public access. - // - // Enabling this setting doesn't affect existing bucket policies. - BlockPublicPolicy *bool - - // Specifies whether Amazon S3 should ignore public ACLs for this bucket and - // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore - // all public ACLs on this bucket and objects in this bucket. - // - // Enabling this setting doesn't affect the persistence of any existing ACLs and - // doesn't prevent new public ACLs from being set. - IgnorePublicAcls *bool - - // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. Setting this element to TRUE restricts access to this bucket to only - // Amazon Web Services service principals and authorized users within this account - // if the bucket has a public policy. - // - // Enabling this setting doesn't affect previously stored bucket policies, except - // that public and cross-account access within any public bucket policy, including - // non-public delegation to specific accounts, is blocked. - RestrictPublicBuckets *bool - - noSmithyDocumentSerde -} - -// Specifies the configuration for publishing messages to an Amazon Simple Queue -// Service (Amazon SQS) queue when Amazon S3 detects specified events. -type QueueConfiguration struct { - - // A collection of bucket events for which to send notifications - // - // This member is required. - Events []Event - - // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // This member is required. - QueueArn *string - - // Specifies object key name filtering rules. For information about key name - // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. - // - // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html - Filter *NotificationConfigurationFilter - - // An optional unique identifier for configurations in a notification - // configuration. If you don't provide one, Amazon S3 will assign an ID. - Id *string - - noSmithyDocumentSerde -} - -// The journal table record expiration settings for a journal table in an S3 -// -// Metadata configuration. -type RecordExpiration struct { - - // Specifies whether journal table record expiration is enabled or disabled. - // - // This member is required. - Expiration ExpirationState - - // If you enable journal table record expiration, you can set the number of days - // to retain your journal table records. Journal table records must be retained for - // a minimum of 7 days. To set this value, specify any whole number from 7 to - // 2147483647 . For example, to retain your journal table records for one year, set - // this value to 365 . - Days *int32 - - noSmithyDocumentSerde -} - -// The container for the records event. -type RecordsEvent struct { - - // The byte array of partial, one or more result records. S3 Select doesn't - // guarantee that a record will be self-contained in one record frame. To ensure - // continuous streaming of data, S3 Select might split the same record across - // multiple record frames instead of aggregating the results in memory. Some S3 - // clients (for example, the SDKforJava) handle this behavior by creating a - // ByteStream out of the response by default. Other clients might not handle this - // behavior by default. In those cases, you must aggregate the results on the - // client side and parse the response. - Payload []byte - - noSmithyDocumentSerde -} - -// Specifies how requests are redirected. In the event of an error, you can -// specify a different error code to return. -type Redirect struct { - - // The host name to use in the redirect request. - HostName *string - - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string - - // Protocol to use when redirecting requests. The default is the protocol that is - // used in the original request. - Protocol Protocol - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one - // of the siblings is present. Can be present only if ReplaceKeyWith is not - // provided. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - ReplaceKeyPrefixWith *string - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html . Not required if one of the siblings is present. Can be - // present only if ReplaceKeyPrefixWith is not provided. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - ReplaceKeyWith *string - - noSmithyDocumentSerde -} - -// Specifies the redirect behavior of all requests to a website endpoint of an -// Amazon S3 bucket. -type RedirectAllRequestsTo struct { - - // Name of the host where requests are redirected. - // - // This member is required. - HostName *string - - // Protocol to use when redirecting requests. The default is the protocol that is - // used in the original request. - Protocol Protocol - - noSmithyDocumentSerde -} - -// A filter that you can specify for selection for modifications on replicas. -// Amazon S3 doesn't replicate replica modifications by default. In the latest -// version of replication configuration (when Filter is specified), you can -// specify this element and set the status to Enabled to replicate modifications -// on replicas. -// -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, this element -// is not allowed. -type ReplicaModifications struct { - - // Specifies whether Amazon S3 replicates modifications on replicas. - // - // This member is required. - Status ReplicaModificationsStatus - - noSmithyDocumentSerde -} - -// A container for replication rules. You can add up to 1,000 rules. The maximum -// size of a replication configuration is 2 MB. -type ReplicationConfiguration struct { - - // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role - // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in - // the Amazon S3 User Guide. - // - // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html - // - // This member is required. - Role *string - - // A container for one or more replication rules. A replication configuration must - // have at least one rule and can contain a maximum of 1,000 rules. - // - // This member is required. - Rules []ReplicationRule - - noSmithyDocumentSerde -} - -// Specifies which Amazon S3 objects to replicate and where to store the replicas. -type ReplicationRule struct { - - // A container for information about the replication destination and its - // configurations including enabling the S3 Replication Time Control (S3 RTC). - // - // This member is required. - Destination *Destination - - // Specifies whether the rule is enabled. - // - // This member is required. - Status ReplicationRuleStatus - - // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter - // in your replication configuration, you must also include a - // DeleteMarkerReplication element. If your Filter includes a Tag element, the - // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does - // not support replicating delete markers for tag-based rules. For an example - // configuration, see [Basic Rule Configuration]. - // - // For more information about delete marker replication, see [Basic Rule Configuration]. - // - // If you are using an earlier version of the replication configuration, Amazon S3 - // handles replication of delete markers differently. For more information, see [Backward Compatibility]. - // - // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html - // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations - DeleteMarkerReplication *DeleteMarkerReplication - - // Optional configuration to replicate existing source bucket objects. - // - // This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in - // the Amazon S3 User Guide. - // - // [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html - ExistingObjectReplication *ExistingObjectReplication - - // A filter that identifies the subset of objects to which the replication rule - // applies. A Filter must specify exactly one Prefix , Tag , or an And child - // element. - Filter *ReplicationRuleFilter - - // A unique identifier for the rule. The maximum value is 255 characters. - ID *string - - // An object key name prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // Deprecated: This member has been deprecated. - Prefix *string - - // The priority indicates which rule has precedence whenever two or more - // replication rules conflict. Amazon S3 will attempt to replicate objects - // according to all replication rules. However, if there are two or more rules with - // the same destination bucket, then objects will be replicated according to the - // rule with the highest priority. The higher the number, the higher the priority. - // - // For more information, see [Replication] in the Amazon S3 User Guide. - // - // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html - Priority *int32 - - // A container that describes additional filters for identifying the source - // objects that you want to replicate. You can choose to enable or disable the - // replication of these objects. Currently, Amazon S3 supports only the filter that - // you can specify for objects created with server-side encryption using a customer - // managed key stored in Amazon Web Services Key Management Service (SSE-KMS). - SourceSelectionCriteria *SourceSelectionCriteria - - noSmithyDocumentSerde -} - -// A container for specifying rule filters. The filters determine the subset of -// objects to which the rule applies. This element is required only if you specify -// more than one filter. -// -// For example: -// -// - If you specify both a Prefix and a Tag filter, wrap these filters in an And -// tag. -// -// - If you specify a filter based on multiple tags, wrap the Tag elements in an -// And tag. -type ReplicationRuleAndOperator struct { - - // An object key name prefix that identifies the subset of objects to which the - // rule applies. - Prefix *string - - // An array of tags containing key and value pairs. - Tags []Tag - - noSmithyDocumentSerde -} - -// A filter that identifies the subset of objects to which the replication rule -// applies. A Filter must specify exactly one Prefix , Tag , or an And child -// element. -type ReplicationRuleFilter struct { - - // A container for specifying rule filters. The filters determine the subset of - // objects to which the rule applies. This element is required only if you specify - // more than one filter. For example: - // - // - If you specify both a Prefix and a Tag filter, wrap these filters in an And - // tag. - // - // - If you specify a filter based on multiple tags, wrap the Tag elements in an - // And tag. - And *ReplicationRuleAndOperator - - // An object key name prefix that identifies the subset of objects to which the - // rule applies. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - Prefix *string - - // A container for specifying a tag key and value. - // - // The rule applies only to objects that have the tag in their tag set. - Tag *Tag - - noSmithyDocumentSerde -} - -// A container specifying S3 Replication Time Control (S3 RTC) related -// -// information, including whether S3 RTC is enabled and the time when all objects -// and operations on objects must be replicated. Must be specified together with a -// Metrics block. -type ReplicationTime struct { - - // Specifies whether the replication time is enabled. - // - // This member is required. - Status ReplicationTimeStatus - - // A container specifying the time by which replication should be complete for - // all objects and operations on objects. - // - // This member is required. - Time *ReplicationTimeValue - - noSmithyDocumentSerde -} - -// A container specifying the time value for S3 Replication Time Control (S3 RTC) -// -// and replication metrics EventThreshold . -type ReplicationTimeValue struct { - - // Contains an integer specifying time in minutes. - // - // Valid value: 15 - Minutes *int32 - - noSmithyDocumentSerde -} - -// Container for Payer. -type RequestPaymentConfiguration struct { - - // Specifies who pays for the download and request fees. - // - // This member is required. - Payer Payer - - noSmithyDocumentSerde -} - -// Container for specifying if periodic QueryProgress messages should be sent. -type RequestProgress struct { - - // Specifies whether periodic QueryProgress frames should be sent. Valid values: - // TRUE, FALSE. Default value: FALSE. - Enabled *bool - - noSmithyDocumentSerde -} - -// Container for restore job parameters. -type RestoreRequest struct { - - // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation . - // - // The Days element is required for regular restores, and must not be provided for - // select requests. - Days *int32 - - // The optional description for the job. - Description *string - - // S3 Glacier related parameters pertaining to this job. Do not use with restores - // that specify OutputLocation . - GlacierJobParameters *GlacierJobParameters - - // Describes the location where the restore job's output is stored. - OutputLocation *OutputLocation - - // Amazon S3 Select is no longer available to new customers. Existing customers of - // Amazon S3 Select can continue to use the feature as usual. [Learn more] - // - // Describes the parameters for Select job types. - // - // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ - SelectParameters *SelectParameters - - // Retrieval tier at which the restore will be processed. - Tier Tier - - // Amazon S3 Select is no longer available to new customers. Existing customers of - // Amazon S3 Select can continue to use the feature as usual. [Learn more] - // - // Type of restore request. - // - // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ - Type RestoreRequestType - - noSmithyDocumentSerde -} - -// Specifies the restoration status of an object. Objects in certain storage -// classes must be restored before they can be retrieved. For more information -// about these storage classes and how to work with archived objects, see [Working with archived objects]in the -// Amazon S3 User Guide. -// -// This functionality is not supported for directory buckets. Directory buckets -// only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in -// Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage -// class) in Dedicated Local Zones. -// -// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html -type RestoreStatus struct { - - // Specifies whether the object is currently being restored. If the object - // restoration is in progress, the header returns the value TRUE . For example: - // - // x-amz-optional-object-attributes: IsRestoreInProgress="true" - // - // If the object restoration has completed, the header returns the value FALSE . - // For example: - // - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" - // - // If the object hasn't been restored, there is no header response. - IsRestoreInProgress *bool - - // Indicates when the restored copy will expire. This value is populated only if - // the object has already been restored. For example: - // - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" - RestoreExpiryDate *time.Time - - noSmithyDocumentSerde -} - -// Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. -// -// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects -type RoutingRule struct { - - // Container for redirect information. You can redirect requests to another host, - // to another page, or with another protocol. In the event of an error, you can - // specify a different error code to return. - // - // This member is required. - Redirect *Redirect - - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition - - noSmithyDocumentSerde -} - -// A container for object key name prefix and suffix filtering rules. -type S3KeyFilter struct { - - // A list of containers for the key-value pair that defines the criteria for the - // filter rule. - FilterRules []FilterRule - - noSmithyDocumentSerde -} - -// Describes an Amazon S3 location that will receive the results of the restore -// request. -type S3Location struct { - - // The name of the bucket where the restore results will be placed. - // - // This member is required. - BucketName *string - - // The prefix that is prepended to the restore results for this request. - // - // This member is required. - Prefix *string - - // A list of grants that control access to the staged results. - AccessControlList []Grant - - // The canned ACL to apply to the restore results. - CannedACL ObjectCannedACL - - // Contains the type of server-side encryption used. - Encryption *Encryption - - // The class of storage used to store the restore results. - StorageClass StorageClass - - // The tag-set that is applied to the restore results. - Tagging *Tagging - - // A list of metadata to store with the restore results in S3. - UserMetadata []MetadataEntry - - noSmithyDocumentSerde -} - -// The destination information for a V1 S3 Metadata configuration. The -// -// destination table bucket must be in the same Region and Amazon Web Services -// account as the general purpose bucket. The specified metadata table name must be -// unique within the aws_s3_metadata namespace in the destination table bucket. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -type S3TablesDestination struct { - - // The Amazon Resource Name (ARN) for the table bucket that's specified as the - // destination in the metadata table configuration. The destination table bucket - // must be in the same Region and Amazon Web Services account as the general - // purpose bucket. - // - // This member is required. - TableBucketArn *string - - // The name for the metadata table in your metadata table configuration. The - // specified metadata table name must be unique within the aws_s3_metadata - // namespace in the destination table bucket. - // - // This member is required. - TableName *string - - noSmithyDocumentSerde -} - -// The destination information for a V1 S3 Metadata configuration. The -// -// destination table bucket must be in the same Region and Amazon Web Services -// account as the general purpose bucket. The specified metadata table name must be -// unique within the aws_s3_metadata namespace in the destination table bucket. -// -// If you created your S3 Metadata configuration before July 15, 2025, we -// recommend that you delete and re-create your configuration by using [CreateBucketMetadataConfiguration]so that you -// can expire journal table records and create a live inventory table. -// -// [CreateBucketMetadataConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataConfiguration.html -type S3TablesDestinationResult struct { - - // The Amazon Resource Name (ARN) for the metadata table in the metadata table - // configuration. The specified metadata table name must be unique within the - // aws_s3_metadata namespace in the destination table bucket. - // - // This member is required. - TableArn *string - - // The Amazon Resource Name (ARN) for the table bucket that's specified as the - // destination in the metadata table configuration. The destination table bucket - // must be in the same Region and Amazon Web Services account as the general - // purpose bucket. - // - // This member is required. - TableBucketArn *string - - // The name for the metadata table in your metadata table configuration. The - // specified metadata table name must be unique within the aws_s3_metadata - // namespace in the destination table bucket. - // - // This member is required. - TableName *string - - // The table bucket namespace for the metadata table in your metadata table - // configuration. This value is always aws_s3_metadata . - // - // This member is required. - TableNamespace *string - - noSmithyDocumentSerde -} - -// Specifies the byte range of the object to get the records from. A record is -// processed when its first byte is contained by the range. This parameter is -// optional, but when specified, it must not be empty. See RFC 2616, Section -// 14.35.1 about how to specify the start and end of the range. -type ScanRange struct { - - // Specifies the end of the byte range. This parameter is optional. Valid values: - // non-negative integers. The default value is one less than the size of the object - // being queried. If only the End parameter is supplied, it is interpreted to mean - // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. - End *int64 - - // Specifies the start of the byte range. This parameter is optional. Valid - // values: non-negative integers. The default value is 0. If only start is - // supplied, it means scan from that point to the end of the file. For example, 50 - // means scan from byte 50 until the end of the file. - Start *int64 - - noSmithyDocumentSerde -} - -// The container for selecting objects from a content event stream. -// -// The following types satisfy this interface: -// -// SelectObjectContentEventStreamMemberCont -// SelectObjectContentEventStreamMemberEnd -// SelectObjectContentEventStreamMemberProgress -// SelectObjectContentEventStreamMemberRecords -// SelectObjectContentEventStreamMemberStats -type SelectObjectContentEventStream interface { - isSelectObjectContentEventStream() -} - -// The Continuation Event. -type SelectObjectContentEventStreamMemberCont struct { - Value ContinuationEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberCont) isSelectObjectContentEventStream() {} - -// The End Event. -type SelectObjectContentEventStreamMemberEnd struct { - Value EndEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberEnd) isSelectObjectContentEventStream() {} - -// The Progress Event. -type SelectObjectContentEventStreamMemberProgress struct { - Value ProgressEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberProgress) isSelectObjectContentEventStream() {} - -// The Records Event. -type SelectObjectContentEventStreamMemberRecords struct { - Value RecordsEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberRecords) isSelectObjectContentEventStream() {} - -// The Stats Event. -type SelectObjectContentEventStreamMemberStats struct { - Value StatsEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} - -// Amazon S3 Select is no longer available to new customers. Existing customers of -// Amazon S3 Select can continue to use the feature as usual. [Learn more] -// -// Describes the parameters for Select job types. -// -// Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering. -// -// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ -// [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ -// [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html -// [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html -type SelectParameters struct { - - // Amazon S3 Select is no longer available to new customers. Existing customers of - // Amazon S3 Select can continue to use the feature as usual. [Learn more] - // - // The expression that is used to query the object. - // - // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ - // - // This member is required. - Expression *string - - // The type of the provided expression (for example, SQL). - // - // This member is required. - ExpressionType ExpressionType - - // Describes the serialization format of the object. - // - // This member is required. - InputSerialization *InputSerialization - - // Describes how the results of the Select job are serialized. - // - // This member is required. - OutputSerialization *OutputSerialization - - noSmithyDocumentSerde -} - -// Describes the default server-side encryption to apply to new objects in the -// bucket. If a PUT Object request doesn't specify any server-side encryption, this -// default encryption will be applied. For more information, see [PutBucketEncryption]. -// -// - General purpose buckets - If you don't specify a customer managed key at -// configuration, Amazon S3 automatically creates an Amazon Web Services KMS key ( -// aws/s3 ) in your Amazon Web Services account the first time that you add an -// object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS -// key for SSE-KMS. -// -// - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per -// directory bucket's lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: SSE-S3 and SSE-KMS. -// -// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html -// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk -type ServerSideEncryptionByDefault struct { - - // Server-side encryption algorithm to use for the default encryption. - // - // For directory buckets, there are only two supported values for server-side - // encryption: AES256 and aws:kms . - // - // This member is required. - SSEAlgorithm ServerSideEncryption - - // Amazon Web Services Key Management Service (KMS) customer managed key ID to use - // for the default encryption. - // - // - General purpose buckets - This parameter is allowed if and only if - // SSEAlgorithm is set to aws:kms or aws:kms:dsse . - // - // - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is - // set to aws:kms . - // - // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the - // KMS key. - // - // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // - Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // - Key Alias: alias/alias-name - // - // If you are using encryption with cross-account or Amazon Web Services service - // operations, you must use a fully qualified KMS key ARN. For more information, - // see [Using encryption for cross-account operations]. - // - // - General purpose buckets - If you're specifying a customer managed KMS key, - // we recommend using a fully qualified KMS key ARN. If you use a KMS key alias - // instead, then KMS resolves the key within the requester’s account. This behavior - // can result in data that's encrypted with a KMS key that belongs to the - // requester, and not the bucket owner. Also, if you use a key ID, you can run into - // a LogDestination undeliverable error when creating a VPC flow log. - // - // - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory - // bucket, only use the key ID or key ARN. The key alias format of the KMS key - // isn't supported. - // - // Amazon S3 only supports symmetric encryption KMS keys. For more information, - // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. - // - // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy - // [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - KMSMasterKeyID *string - - noSmithyDocumentSerde -} - -// Specifies the default server-side-encryption configuration. -type ServerSideEncryptionConfiguration struct { - - // Container for information about a particular server-side encryption - // configuration rule. - // - // This member is required. - Rules []ServerSideEncryptionRule - - noSmithyDocumentSerde -} - -// Specifies the default server-side encryption configuration. -// -// - General purpose buckets - If you're specifying a customer managed KMS key, -// we recommend using a fully qualified KMS key ARN. If you use a KMS key alias -// instead, then KMS resolves the key within the requester’s account. This behavior -// can result in data that's encrypted with a KMS key that belongs to the -// requester, and not the bucket owner. -// -// - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory -// bucket, only use the key ID or key ARN. The key alias format of the KMS key -// isn't supported. -// -// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -type ServerSideEncryptionRule struct { - - // Specifies the default server-side encryption to apply to new objects in the - // bucket. If a PUT Object request doesn't specify any server-side encryption, this - // default encryption will be applied. - ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault - - // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side - // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects - // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 - // to use an S3 Bucket Key. - // - // - General purpose buckets - By default, S3 Bucket Key is not enabled. For - // more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide. - // - // - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT - // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't - // supported, when you copy SSE-KMS encrypted objects from general purpose buckets - // to directory buckets, from directory buckets to general purpose buckets, or - // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a - // call to KMS every time a copy request is made for a KMS-encrypted object. - // - // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - noSmithyDocumentSerde -} - -// The established temporary security credentials of the session. -// -// Directory buckets - These session credentials are only supported for the -// authentication and authorization of Zonal endpoint API operations on directory -// buckets. -type SessionCredentials struct { - - // A unique identifier that's associated with a secret access key. The access key - // ID and the secret access key are used together to sign programmatic Amazon Web - // Services requests cryptographically. - // - // This member is required. - AccessKeyId *string - - // Temporary security credentials expire after a specified interval. After - // temporary credentials expire, any calls that you make with those credentials - // will fail. So you must generate a new set of temporary credentials. Temporary - // credentials cannot be extended or refreshed beyond the original specified - // interval. - // - // This member is required. - Expiration *time.Time - - // A key that's used with the access key ID to cryptographically sign programmatic - // Amazon Web Services requests. Signing a request identifies the sender and - // prevents the request from being altered. - // - // This member is required. - SecretAccessKey *string - - // A part of the temporary security credentials. The session token is used to - // validate the temporary security credentials. - // - // This member is required. - SessionToken *string - - noSmithyDocumentSerde -} - -// To use simple format for S3 keys for log objects, set SimplePrefix to an empty -// object. -// -// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] -type SimplePrefix struct { - noSmithyDocumentSerde -} - -// A container that describes additional filters for identifying the source -// objects that you want to replicate. You can choose to enable or disable the -// replication of these objects. Currently, Amazon S3 supports only the filter that -// you can specify for objects created with server-side encryption using a customer -// managed key stored in Amazon Web Services Key Management Service (SSE-KMS). -type SourceSelectionCriteria struct { - - // A filter that you can specify for selections for modifications on replicas. - // Amazon S3 doesn't replicate replica modifications by default. In the latest - // version of replication configuration (when Filter is specified), you can - // specify this element and set the status to Enabled to replicate modifications - // on replicas. - // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, this element - // is not allowed - ReplicaModifications *ReplicaModifications - - // A container for filter information for the selection of Amazon S3 objects - // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria - // in the replication configuration, this element is required. - SseKmsEncryptedObjects *SseKmsEncryptedObjects - - noSmithyDocumentSerde -} - -// Specifies the use of SSE-KMS to encrypt delivered inventory reports. -type SSEKMS struct { - - // Specifies the ID of the Key Management Service (KMS) symmetric encryption - // customer managed key to use for encrypting inventory reports. - // - // This member is required. - KeyId *string - - noSmithyDocumentSerde -} - -// A container for filter information for the selection of S3 objects encrypted -// with Amazon Web Services KMS. -type SseKmsEncryptedObjects struct { - - // Specifies whether Amazon S3 replicates objects created with server-side - // encryption using an Amazon Web Services KMS key stored in Amazon Web Services - // Key Management Service. - // - // This member is required. - Status SseKmsEncryptedObjectsStatus - - noSmithyDocumentSerde -} - -// Specifies the use of SSE-S3 to encrypt delivered inventory reports. -type SSES3 struct { - noSmithyDocumentSerde -} - -// Container for the stats details. -type Stats struct { - - // The total number of uncompressed object bytes processed. - BytesProcessed *int64 - - // The total number of bytes of records payload data returned. - BytesReturned *int64 - - // The total number of object bytes scanned. - BytesScanned *int64 - - noSmithyDocumentSerde -} - -// Container for the Stats Event. -type StatsEvent struct { - - // The Stats event details. - Details *Stats - - noSmithyDocumentSerde -} - -// Specifies data related to access patterns to be collected and made available to -// analyze the tradeoffs between different storage classes for an Amazon S3 bucket. -type StorageClassAnalysis struct { - - // Specifies how data related to the storage class analysis for an Amazon S3 - // bucket should be exported. - DataExport *StorageClassAnalysisDataExport - - noSmithyDocumentSerde -} - -// Container for data related to the storage class analysis for an Amazon S3 -// bucket for export. -type StorageClassAnalysisDataExport struct { - - // The place to store the data for an analysis. - // - // This member is required. - Destination *AnalyticsExportDestination - - // The version of the output schema to use when exporting data. Must be V_1 . - // - // This member is required. - OutputSchemaVersion StorageClassAnalysisSchemaVersion - - noSmithyDocumentSerde -} - -// A container of a key value name pair. -type Tag struct { - - // Name of the object key. - // - // This member is required. - Key *string - - // Value of the tag. - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -// Container for TagSet elements. -type Tagging struct { - - // A collection for a set of tags - // - // This member is required. - TagSet []Tag - - noSmithyDocumentSerde -} - -// Container for granting information. -// -// Buckets that use the bucket owner enforced setting for Object Ownership don't -// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. -// -// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general -type TargetGrant struct { - - // Container for the person being granted permissions. - Grantee *Grantee - - // Logging permissions assigned to the grantee for the bucket. - Permission BucketLogsPermission - - noSmithyDocumentSerde -} - -// Amazon S3 key format for log objects. Only one format, PartitionedPrefix or -// SimplePrefix, is allowed. -type TargetObjectKeyFormat struct { - - // Partitioned S3 key for log objects. - PartitionedPrefix *PartitionedPrefix - - // To use the simple format for S3 keys for log objects. To specify SimplePrefix - // format, set SimplePrefix to {}. - SimplePrefix *SimplePrefix - - noSmithyDocumentSerde -} - -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without additional operational overhead. -type Tiering struct { - - // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 - // Intelligent-Tiering storage class. - // - // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access - // - // This member is required. - AccessTier IntelligentTieringAccessTier - - // The number of consecutive days of no access after which an object will be - // eligible to be transitioned to the corresponding tier. The minimum number of - // days specified for Archive Access tier must be at least 90 days and Deep Archive - // Access tier must be at least 180 days. The maximum can be up to 2 years (730 - // days). - // - // This member is required. - Days *int32 - - noSmithyDocumentSerde -} - -// A container for specifying the configuration for publication of messages to an -// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects -// specified events. -type TopicConfiguration struct { - - // The Amazon S3 bucket event about which to send notifications. For more - // information, see [Supported Event Types]in the Amazon S3 User Guide. - // - // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - // - // This member is required. - Events []Event - - // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // This member is required. - TopicArn *string - - // Specifies object key name filtering rules. For information about key name - // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. - // - // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html - Filter *NotificationConfigurationFilter - - // An optional unique identifier for configurations in a notification - // configuration. If you don't provide one, Amazon S3 will assign an ID. - Id *string - - noSmithyDocumentSerde -} - -// Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 -// User Guide. -// -// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html -type Transition struct { - - // Indicates when objects are transitioned to the specified storage class. The - // date value must be in ISO 8601 format. The time is always midnight UTC. - Date *time.Time - - // Indicates the number of days after creation when objects are transitioned to - // the specified storage class. If the specified storage class is - // INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE , valid values are - // 0 or positive integers. If the specified storage class is STANDARD_IA or - // ONEZONE_IA , valid values are positive integers greater than 30 . Be aware that - // some storage classes have a minimum storage duration and that you're charged for - // transitioning objects before their minimum storage duration. For more - // information, see [Constraints and considerations for transitions]in the Amazon S3 User Guide. - // - // [Constraints and considerations for transitions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html#lifecycle-configuration-constraints - Days *int32 - - // The storage class to which you want the object to transition. - StorageClass TransitionStorageClass - - noSmithyDocumentSerde -} - -// Describes the versioning state of an Amazon S3 bucket. For more information, -// see [PUT Bucket versioning]in the Amazon S3 API Reference. -// -// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html -type VersioningConfiguration struct { - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA delete. - // If the bucket has never been so configured, this element is not returned. - MFADelete MFADelete - - // The versioning state of the bucket. - Status BucketVersioningStatus - - noSmithyDocumentSerde -} - -// Specifies website configuration parameters for an Amazon S3 bucket. -type WebsiteConfiguration struct { - - // The name of the error document for the website. - ErrorDocument *ErrorDocument - - // The name of the index document for the website. - IndexDocument *IndexDocument - - // The redirect behavior for every request to this bucket's website endpoint. - // - // If you specify this property, you can't specify any other property. - RedirectAllRequestsTo *RedirectAllRequestsTo - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []RoutingRule - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -// UnknownUnionMember is returned when a union member is returned over the wire, -// but has an unknown tag. -type UnknownUnionMember struct { - Tag string - Value []byte - - noSmithyDocumentSerde -} - -func (*UnknownUnionMember) isAnalyticsFilter() {} -func (*UnknownUnionMember) isMetricsFilter() {} -func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go deleted file mode 100644 index 0e664c59ce09..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go +++ /dev/null @@ -1,23 +0,0 @@ -package s3 - -// This contains helper methods to set resolver URI into the context object. If they are ever used for -// something other than S3, they should be moved to internal/context/context.go - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type s3resolvedURI struct{} - -// setS3ResolvedURI sets the URI as resolved by the EndpointResolverV2 -func setS3ResolvedURI(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, s3resolvedURI{}, value) -} - -// getS3ResolvedURI gets the URI as resolved by EndpointResolverV2 -func getS3ResolvedURI(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, s3resolvedURI{}).(string) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go deleted file mode 100644 index 8c1ca8ee32d8..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go +++ /dev/null @@ -1,6122 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpAbortMultipartUpload struct { -} - -func (*validateOpAbortMultipartUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AbortMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAbortMultipartUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCompleteMultipartUpload struct { -} - -func (*validateOpCompleteMultipartUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CompleteMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCompleteMultipartUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCopyObject struct { -} - -func (*validateOpCopyObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CopyObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCopyObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateBucket struct { -} - -func (*validateOpCreateBucket) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateBucketInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateBucketInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateBucketMetadataConfiguration struct { -} - -func (*validateOpCreateBucketMetadataConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateBucketMetadataConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateBucketMetadataConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateBucketMetadataConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateBucketMetadataTableConfiguration struct { -} - -func (*validateOpCreateBucketMetadataTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateBucketMetadataTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateMultipartUpload struct { -} - -func (*validateOpCreateMultipartUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateMultipartUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateSession struct { -} - -func (*validateOpCreateSession) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateSessionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateSessionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketAnalyticsConfiguration struct { -} - -func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketCors struct { -} - -func (*validateOpDeleteBucketCors) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketCorsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketCorsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketEncryption struct { -} - -func (*validateOpDeleteBucketEncryption) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketEncryptionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketEncryptionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucket struct { -} - -func (*validateOpDeleteBucket) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketIntelligentTieringConfiguration struct { -} - -func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketInventoryConfiguration struct { -} - -func (*validateOpDeleteBucketInventoryConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketLifecycle struct { -} - -func (*validateOpDeleteBucketLifecycle) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketLifecycleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketLifecycleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketMetadataConfiguration struct { -} - -func (*validateOpDeleteBucketMetadataConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketMetadataConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketMetadataConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketMetadataConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketMetadataTableConfiguration struct { -} - -func (*validateOpDeleteBucketMetadataTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketMetadataTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketMetricsConfiguration struct { -} - -func (*validateOpDeleteBucketMetricsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketOwnershipControls struct { -} - -func (*validateOpDeleteBucketOwnershipControls) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketPolicy struct { -} - -func (*validateOpDeleteBucketPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketReplication struct { -} - -func (*validateOpDeleteBucketReplication) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketReplicationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketReplicationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketTagging struct { -} - -func (*validateOpDeleteBucketTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketWebsite struct { -} - -func (*validateOpDeleteBucketWebsite) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketWebsiteInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketWebsiteInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteObject struct { -} - -func (*validateOpDeleteObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteObjects struct { -} - -func (*validateOpDeleteObjects) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteObjectsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteObjectsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteObjectTagging struct { -} - -func (*validateOpDeleteObjectTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteObjectTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteObjectTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeletePublicAccessBlock struct { -} - -func (*validateOpDeletePublicAccessBlock) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeletePublicAccessBlockInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeletePublicAccessBlockInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketAccelerateConfiguration struct { -} - -func (*validateOpGetBucketAccelerateConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketAcl struct { -} - -func (*validateOpGetBucketAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketAnalyticsConfiguration struct { -} - -func (*validateOpGetBucketAnalyticsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketCors struct { -} - -func (*validateOpGetBucketCors) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketCorsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketCorsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketEncryption struct { -} - -func (*validateOpGetBucketEncryption) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketEncryptionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketEncryptionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketIntelligentTieringConfiguration struct { -} - -func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketInventoryConfiguration struct { -} - -func (*validateOpGetBucketInventoryConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketLifecycleConfiguration struct { -} - -func (*validateOpGetBucketLifecycleConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketLocation struct { -} - -func (*validateOpGetBucketLocation) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketLocationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketLocationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketLogging struct { -} - -func (*validateOpGetBucketLogging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketLoggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketLoggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketMetadataConfiguration struct { -} - -func (*validateOpGetBucketMetadataConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketMetadataConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketMetadataConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketMetadataConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketMetadataTableConfiguration struct { -} - -func (*validateOpGetBucketMetadataTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketMetadataTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketMetricsConfiguration struct { -} - -func (*validateOpGetBucketMetricsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketNotificationConfiguration struct { -} - -func (*validateOpGetBucketNotificationConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketOwnershipControls struct { -} - -func (*validateOpGetBucketOwnershipControls) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketOwnershipControlsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketPolicy struct { -} - -func (*validateOpGetBucketPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketPolicyStatus struct { -} - -func (*validateOpGetBucketPolicyStatus) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketPolicyStatusInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketPolicyStatusInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketReplication struct { -} - -func (*validateOpGetBucketReplication) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketReplicationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketReplicationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketRequestPayment struct { -} - -func (*validateOpGetBucketRequestPayment) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketRequestPaymentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketRequestPaymentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketTagging struct { -} - -func (*validateOpGetBucketTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketVersioning struct { -} - -func (*validateOpGetBucketVersioning) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketVersioningInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketVersioningInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketWebsite struct { -} - -func (*validateOpGetBucketWebsite) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketWebsiteInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketWebsiteInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectAcl struct { -} - -func (*validateOpGetObjectAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectAttributes struct { -} - -func (*validateOpGetObjectAttributes) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectAttributesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectAttributesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObject struct { -} - -func (*validateOpGetObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectLegalHold struct { -} - -func (*validateOpGetObjectLegalHold) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectLegalHoldInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectLegalHoldInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectLockConfiguration struct { -} - -func (*validateOpGetObjectLockConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectLockConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectLockConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectRetention struct { -} - -func (*validateOpGetObjectRetention) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectRetentionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectRetentionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectTagging struct { -} - -func (*validateOpGetObjectTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectTorrent struct { -} - -func (*validateOpGetObjectTorrent) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectTorrentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectTorrentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetPublicAccessBlock struct { -} - -func (*validateOpGetPublicAccessBlock) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetPublicAccessBlockInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetPublicAccessBlockInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpHeadBucket struct { -} - -func (*validateOpHeadBucket) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*HeadBucketInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpHeadBucketInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpHeadObject struct { -} - -func (*validateOpHeadObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*HeadObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpHeadObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketAnalyticsConfigurations struct { -} - -func (*validateOpListBucketAnalyticsConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketIntelligentTieringConfigurations struct { -} - -func (*validateOpListBucketIntelligentTieringConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketInventoryConfigurations struct { -} - -func (*validateOpListBucketInventoryConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketMetricsConfigurations struct { -} - -func (*validateOpListBucketMetricsConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListMultipartUploads struct { -} - -func (*validateOpListMultipartUploads) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListMultipartUploadsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListMultipartUploadsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListObjects struct { -} - -func (*validateOpListObjects) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListObjectsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListObjectsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListObjectsV2 struct { -} - -func (*validateOpListObjectsV2) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListObjectsV2Input) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListObjectsV2Input(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListObjectVersions struct { -} - -func (*validateOpListObjectVersions) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListObjectVersionsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListObjectVersionsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListParts struct { -} - -func (*validateOpListParts) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListPartsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListPartsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketAccelerateConfiguration struct { -} - -func (*validateOpPutBucketAccelerateConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketAcl struct { -} - -func (*validateOpPutBucketAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketAnalyticsConfiguration struct { -} - -func (*validateOpPutBucketAnalyticsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketCors struct { -} - -func (*validateOpPutBucketCors) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketCorsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketCorsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketEncryption struct { -} - -func (*validateOpPutBucketEncryption) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketEncryptionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketEncryptionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketIntelligentTieringConfiguration struct { -} - -func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketInventoryConfiguration struct { -} - -func (*validateOpPutBucketInventoryConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketLifecycleConfiguration struct { -} - -func (*validateOpPutBucketLifecycleConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketLogging struct { -} - -func (*validateOpPutBucketLogging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketLoggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketLoggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketMetricsConfiguration struct { -} - -func (*validateOpPutBucketMetricsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketNotificationConfiguration struct { -} - -func (*validateOpPutBucketNotificationConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketOwnershipControls struct { -} - -func (*validateOpPutBucketOwnershipControls) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketOwnershipControlsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketPolicy struct { -} - -func (*validateOpPutBucketPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketReplication struct { -} - -func (*validateOpPutBucketReplication) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketReplicationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketReplicationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketRequestPayment struct { -} - -func (*validateOpPutBucketRequestPayment) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketRequestPaymentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketRequestPaymentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketTagging struct { -} - -func (*validateOpPutBucketTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketVersioning struct { -} - -func (*validateOpPutBucketVersioning) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketVersioningInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketVersioningInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketWebsite struct { -} - -func (*validateOpPutBucketWebsite) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketWebsiteInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketWebsiteInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectAcl struct { -} - -func (*validateOpPutObjectAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObject struct { -} - -func (*validateOpPutObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectLegalHold struct { -} - -func (*validateOpPutObjectLegalHold) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectLegalHoldInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectLegalHoldInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectLockConfiguration struct { -} - -func (*validateOpPutObjectLockConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectLockConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectLockConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectRetention struct { -} - -func (*validateOpPutObjectRetention) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectRetentionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectRetentionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectTagging struct { -} - -func (*validateOpPutObjectTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutPublicAccessBlock struct { -} - -func (*validateOpPutPublicAccessBlock) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutPublicAccessBlockInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutPublicAccessBlockInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpRenameObject struct { -} - -func (*validateOpRenameObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpRenameObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*RenameObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpRenameObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpRestoreObject struct { -} - -func (*validateOpRestoreObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*RestoreObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpRestoreObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpSelectObjectContent struct { -} - -func (*validateOpSelectObjectContent) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpSelectObjectContent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*SelectObjectContentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpSelectObjectContentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUpdateBucketMetadataInventoryTableConfiguration struct { -} - -func (*validateOpUpdateBucketMetadataInventoryTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUpdateBucketMetadataInventoryTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UpdateBucketMetadataInventoryTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUpdateBucketMetadataInventoryTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUpdateBucketMetadataJournalTableConfiguration struct { -} - -func (*validateOpUpdateBucketMetadataJournalTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUpdateBucketMetadataJournalTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UpdateBucketMetadataJournalTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUpdateBucketMetadataJournalTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUploadPartCopy struct { -} - -func (*validateOpUploadPartCopy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UploadPartCopyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUploadPartCopyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUploadPart struct { -} - -func (*validateOpUploadPart) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UploadPartInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUploadPartInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpWriteGetObjectResponse struct { -} - -func (*validateOpWriteGetObjectResponse) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpWriteGetObjectResponse) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*WriteGetObjectResponseInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpWriteGetObjectResponseInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After) -} - -func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After) -} - -func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After) -} - -func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) -} - -func addOpCreateBucketMetadataConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateBucketMetadataConfiguration{}, middleware.After) -} - -func addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateBucketMetadataTableConfiguration{}, middleware.After) -} - -func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) -} - -func addOpCreateSessionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateSession{}, middleware.After) -} - -func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) -} - -func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After) -} - -func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After) -} - -func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After) -} - -func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) -} - -func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After) -} - -func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) -} - -func addOpDeleteBucketMetadataConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketMetadataConfiguration{}, middleware.After) -} - -func addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketMetadataTableConfiguration{}, middleware.After) -} - -func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) -} - -func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After) -} - -func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After) -} - -func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After) -} - -func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After) -} - -func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After) -} - -func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After) -} - -func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After) -} - -func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After) -} - -func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After) -} - -func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After) -} - -func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After) -} - -func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After) -} - -func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After) -} - -func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After) -} - -func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After) -} - -func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After) -} - -func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After) -} - -func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After) -} - -func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) -} - -func addOpGetBucketMetadataConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketMetadataConfiguration{}, middleware.After) -} - -func addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketMetadataTableConfiguration{}, middleware.After) -} - -func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) -} - -func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After) -} - -func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After) -} - -func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After) -} - -func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After) -} - -func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After) -} - -func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After) -} - -func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After) -} - -func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After) -} - -func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After) -} - -func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After) -} - -func addOpGetObjectAttributesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectAttributes{}, middleware.After) -} - -func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObject{}, middleware.After) -} - -func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After) -} - -func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After) -} - -func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After) -} - -func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After) -} - -func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After) -} - -func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After) -} - -func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After) -} - -func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After) -} - -func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After) -} - -func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After) -} - -func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After) -} - -func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After) -} - -func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After) -} - -func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListObjects{}, middleware.After) -} - -func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After) -} - -func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After) -} - -func addOpListPartsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListParts{}, middleware.After) -} - -func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After) -} - -func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After) -} - -func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After) -} - -func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After) -} - -func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After) -} - -func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After) -} - -func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After) -} - -func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After) -} - -func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After) -} - -func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After) -} - -func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After) -} - -func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After) -} - -func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After) -} - -func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After) -} - -func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After) -} - -func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After) -} - -func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After) -} - -func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After) -} - -func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After) -} - -func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObject{}, middleware.After) -} - -func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After) -} - -func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After) -} - -func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After) -} - -func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After) -} - -func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After) -} - -func addOpRenameObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpRenameObject{}, middleware.After) -} - -func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After) -} - -func addOpSelectObjectContentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpSelectObjectContent{}, middleware.After) -} - -func addOpUpdateBucketMetadataInventoryTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUpdateBucketMetadataInventoryTableConfiguration{}, middleware.After) -} - -func addOpUpdateBucketMetadataJournalTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUpdateBucketMetadataJournalTableConfiguration{}, middleware.After) -} - -func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After) -} - -func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After) -} - -func addOpWriteGetObjectResponseValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpWriteGetObjectResponse{}, middleware.After) -} - -func validateAccessControlPolicy(v *types.AccessControlPolicy) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"} - if v.Grants != nil { - if err := validateGrants(v.Grants); err != nil { - invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAccessControlTranslation(v *types.AccessControlTranslation) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"} - if len(v.Owner) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Owner")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"} - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.Filter != nil { - if err := validateAnalyticsFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if v.StorageClassAnalysis == nil { - invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis")) - } else if v.StorageClassAnalysis != nil { - if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil { - invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"} - if v.S3BucketDestination == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) - } else if v.S3BucketDestination != nil { - if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsFilter(v types.AnalyticsFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"} - switch uv := v.(type) { - case *types.AnalyticsFilterMemberAnd: - if err := validateAnalyticsAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) - } - - case *types.AnalyticsFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) - } - - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"} - if len(v.Format) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Format")) - } - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateLifecycleRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"} - if v.LoggingEnabled != nil { - if err := validateLoggingEnabled(v.LoggingEnabled); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCORSConfiguration(v *types.CORSConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"} - if v.CORSRules == nil { - invalidParams.Add(smithy.NewErrParamRequired("CORSRules")) - } else if v.CORSRules != nil { - if err := validateCORSRules(v.CORSRules); err != nil { - invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCORSRule(v *types.CORSRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CORSRule"} - if v.AllowedMethods == nil { - invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods")) - } - if v.AllowedOrigins == nil { - invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCORSRules(v []types.CORSRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CORSRules"} - for i := range v { - if err := validateCORSRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCreateBucketConfiguration(v *types.CreateBucketConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateBucketConfiguration"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateDelete(v *types.Delete) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Delete"} - if v.Objects == nil { - invalidParams.Add(smithy.NewErrParamRequired("Objects")) - } else if v.Objects != nil { - if err := validateObjectIdentifierList(v.Objects); err != nil { - invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateDestination(v *types.Destination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Destination"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.AccessControlTranslation != nil { - if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil { - invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError)) - } - } - if v.ReplicationTime != nil { - if err := validateReplicationTime(v.ReplicationTime); err != nil { - invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError)) - } - } - if v.Metrics != nil { - if err := validateMetrics(v.Metrics); err != nil { - invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateEncryption(v *types.Encryption) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Encryption"} - if len(v.EncryptionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateErrorDocument(v *types.ErrorDocument) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateExistingObjectReplication(v *types.ExistingObjectReplication) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGlacierJobParameters(v *types.GlacierJobParameters) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"} - if len(v.Tier) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Tier")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGrant(v *types.Grant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Grant"} - if v.Grantee != nil { - if err := validateGrantee(v.Grantee); err != nil { - invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGrantee(v *types.Grantee) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Grantee"} - if len(v.Type) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Type")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGrants(v []types.Grant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Grants"} - for i := range v { - if err := validateGrant(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIndexDocument(v *types.IndexDocument) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"} - if v.Suffix == nil { - invalidParams.Add(smithy.NewErrParamRequired("Suffix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"} - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.Filter != nil { - if err := validateIntelligentTieringFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if v.Tierings == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tierings")) - } else if v.Tierings != nil { - if err := validateTieringList(v.Tierings); err != nil { - invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"} - if v.Tag != nil { - if err := validateTag(v.Tag); err != nil { - invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) - } - } - if v.And != nil { - if err := validateIntelligentTieringAndOperator(v.And); err != nil { - invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryConfiguration(v *types.InventoryConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"} - if v.Destination == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destination")) - } else if v.Destination != nil { - if err := validateInventoryDestination(v.Destination); err != nil { - invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) - } - } - if v.IsEnabled == nil { - invalidParams.Add(smithy.NewErrParamRequired("IsEnabled")) - } - if v.Filter != nil { - if err := validateInventoryFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if len(v.IncludedObjectVersions) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions")) - } - if v.Schedule == nil { - invalidParams.Add(smithy.NewErrParamRequired("Schedule")) - } else if v.Schedule != nil { - if err := validateInventorySchedule(v.Schedule); err != nil { - invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryDestination(v *types.InventoryDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"} - if v.S3BucketDestination == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) - } else if v.S3BucketDestination != nil { - if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryEncryption(v *types.InventoryEncryption) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"} - if v.SSEKMS != nil { - if err := validateSSEKMS(v.SSEKMS); err != nil { - invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryFilter(v *types.InventoryFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"} - if v.Prefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("Prefix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if len(v.Format) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Format")) - } - if v.Encryption != nil { - if err := validateInventoryEncryption(v.Encryption); err != nil { - invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventorySchedule(v *types.InventorySchedule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"} - if len(v.Frequency) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Frequency")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryTableConfiguration(v *types.InventoryTableConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryTableConfiguration"} - if len(v.ConfigurationState) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ConfigurationState")) - } - if v.EncryptionConfiguration != nil { - if err := validateMetadataTableEncryptionConfiguration(v.EncryptionConfiguration); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryTableConfigurationUpdates(v *types.InventoryTableConfigurationUpdates) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryTableConfigurationUpdates"} - if len(v.ConfigurationState) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ConfigurationState")) - } - if v.EncryptionConfiguration != nil { - if err := validateMetadataTableEncryptionConfiguration(v.EncryptionConfiguration); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateJournalTableConfiguration(v *types.JournalTableConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "JournalTableConfiguration"} - if v.RecordExpiration == nil { - invalidParams.Add(smithy.NewErrParamRequired("RecordExpiration")) - } else if v.RecordExpiration != nil { - if err := validateRecordExpiration(v.RecordExpiration); err != nil { - invalidParams.AddNested("RecordExpiration", err.(smithy.InvalidParamsError)) - } - } - if v.EncryptionConfiguration != nil { - if err := validateMetadataTableEncryptionConfiguration(v.EncryptionConfiguration); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateJournalTableConfigurationUpdates(v *types.JournalTableConfigurationUpdates) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "JournalTableConfigurationUpdates"} - if v.RecordExpiration == nil { - invalidParams.Add(smithy.NewErrParamRequired("RecordExpiration")) - } else if v.RecordExpiration != nil { - if err := validateRecordExpiration(v.RecordExpiration); err != nil { - invalidParams.AddNested("RecordExpiration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"} - if v.LambdaFunctionArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn")) - } - if v.Events == nil { - invalidParams.Add(smithy.NewErrParamRequired("Events")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"} - for i := range v { - if err := validateLambdaFunctionConfiguration(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRule(v *types.LifecycleRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"} - if v.Filter != nil { - if err := validateLifecycleRuleFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRuleFilter(v *types.LifecycleRuleFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} - if v.Tag != nil { - if err := validateTag(v.Tag); err != nil { - invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) - } - } - if v.And != nil { - if err := validateLifecycleRuleAndOperator(v.And); err != nil { - invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRules(v []types.LifecycleRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"} - for i := range v { - if err := validateLifecycleRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLoggingEnabled(v *types.LoggingEnabled) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"} - if v.TargetBucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("TargetBucket")) - } - if v.TargetGrants != nil { - if err := validateTargetGrants(v.TargetGrants); err != nil { - invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError)) - } - } - if v.TargetPrefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetadataConfiguration(v *types.MetadataConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetadataConfiguration"} - if v.JournalTableConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("JournalTableConfiguration")) - } else if v.JournalTableConfiguration != nil { - if err := validateJournalTableConfiguration(v.JournalTableConfiguration); err != nil { - invalidParams.AddNested("JournalTableConfiguration", err.(smithy.InvalidParamsError)) - } - } - if v.InventoryTableConfiguration != nil { - if err := validateInventoryTableConfiguration(v.InventoryTableConfiguration); err != nil { - invalidParams.AddNested("InventoryTableConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetadataTableConfiguration(v *types.MetadataTableConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetadataTableConfiguration"} - if v.S3TablesDestination == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3TablesDestination")) - } else if v.S3TablesDestination != nil { - if err := validateS3TablesDestination(v.S3TablesDestination); err != nil { - invalidParams.AddNested("S3TablesDestination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetadataTableEncryptionConfiguration(v *types.MetadataTableEncryptionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetadataTableEncryptionConfiguration"} - if len(v.SseAlgorithm) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("SseAlgorithm")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetrics(v *types.Metrics) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Metrics"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetricsAndOperator(v *types.MetricsAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetricsConfiguration(v *types.MetricsConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"} - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.Filter != nil { - if err := validateMetricsFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetricsFilter(v types.MetricsFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"} - switch uv := v.(type) { - case *types.MetricsFilterMemberAnd: - if err := validateMetricsAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) - } - - case *types.MetricsFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) - } - - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateNotificationConfiguration(v *types.NotificationConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"} - if v.TopicConfigurations != nil { - if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil { - invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError)) - } - } - if v.QueueConfigurations != nil { - if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil { - invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError)) - } - } - if v.LambdaFunctionConfigurations != nil { - if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil { - invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateObjectIdentifier(v *types.ObjectIdentifier) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateObjectIdentifierList(v []types.ObjectIdentifier) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"} - for i := range v { - if err := validateObjectIdentifier(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOutputLocation(v *types.OutputLocation) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"} - if v.S3 != nil { - if err := validateS3Location(v.S3); err != nil { - invalidParams.AddNested("S3", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOwnershipControls(v *types.OwnershipControls) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateOwnershipControlsRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"} - if len(v.ObjectOwnership) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"} - for i := range v { - if err := validateOwnershipControlsRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateQueueConfiguration(v *types.QueueConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"} - if v.QueueArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("QueueArn")) - } - if v.Events == nil { - invalidParams.Add(smithy.NewErrParamRequired("Events")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateQueueConfigurationList(v []types.QueueConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"} - for i := range v { - if err := validateQueueConfiguration(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRecordExpiration(v *types.RecordExpiration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RecordExpiration"} - if len(v.Expiration) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Expiration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"} - if v.HostName == nil { - invalidParams.Add(smithy.NewErrParamRequired("HostName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicaModifications(v *types.ReplicaModifications) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} - if v.Role == nil { - invalidParams.Add(smithy.NewErrParamRequired("Role")) - } - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateReplicationRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRule(v *types.ReplicationRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} - if v.Filter != nil { - if err := validateReplicationRuleFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if v.SourceSelectionCriteria != nil { - if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil { - invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError)) - } - } - if v.ExistingObjectReplication != nil { - if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil { - invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError)) - } - } - if v.Destination == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destination")) - } else if v.Destination != nil { - if err := validateDestination(v.Destination); err != nil { - invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRuleFilter(v *types.ReplicationRuleFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} - if v.Tag != nil { - if err := validateTag(v.Tag); err != nil { - invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) - } - } - if v.And != nil { - if err := validateReplicationRuleAndOperator(v.And); err != nil { - invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRules(v []types.ReplicationRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"} - for i := range v { - if err := validateReplicationRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationTime(v *types.ReplicationTime) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if v.Time == nil { - invalidParams.Add(smithy.NewErrParamRequired("Time")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"} - if len(v.Payer) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Payer")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRestoreRequest(v *types.RestoreRequest) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"} - if v.GlacierJobParameters != nil { - if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError)) - } - } - if v.SelectParameters != nil { - if err := validateSelectParameters(v.SelectParameters); err != nil { - invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError)) - } - } - if v.OutputLocation != nil { - if err := validateOutputLocation(v.OutputLocation); err != nil { - invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRoutingRule(v *types.RoutingRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"} - if v.Redirect == nil { - invalidParams.Add(smithy.NewErrParamRequired("Redirect")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRoutingRules(v []types.RoutingRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"} - for i := range v { - if err := validateRoutingRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateS3Location(v *types.S3Location) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "S3Location"} - if v.BucketName == nil { - invalidParams.Add(smithy.NewErrParamRequired("BucketName")) - } - if v.Prefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("Prefix")) - } - if v.Encryption != nil { - if err := validateEncryption(v.Encryption); err != nil { - invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) - } - } - if v.AccessControlList != nil { - if err := validateGrants(v.AccessControlList); err != nil { - invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError)) - } - } - if v.Tagging != nil { - if err := validateTagging(v.Tagging); err != nil { - invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateS3TablesDestination(v *types.S3TablesDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "S3TablesDestination"} - if v.TableBucketArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("TableBucketArn")) - } - if v.TableName == nil { - invalidParams.Add(smithy.NewErrParamRequired("TableName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSelectParameters(v *types.SelectParameters) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"} - if v.InputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) - } - if len(v.ExpressionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) - } - if v.Expression == nil { - invalidParams.Add(smithy.NewErrParamRequired("Expression")) - } - if v.OutputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"} - if len(v.SSEAlgorithm) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateServerSideEncryptionRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"} - if v.ApplyServerSideEncryptionByDefault != nil { - if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil { - invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"} - for i := range v { - if err := validateServerSideEncryptionRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"} - if v.SseKmsEncryptedObjects != nil { - if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil { - invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError)) - } - } - if v.ReplicaModifications != nil { - if err := validateReplicaModifications(v.ReplicaModifications); err != nil { - invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSSEKMS(v *types.SSEKMS) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"} - if v.KeyId == nil { - invalidParams.Add(smithy.NewErrParamRequired("KeyId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"} - if v.DataExport != nil { - if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil { - invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"} - if len(v.OutputSchemaVersion) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion")) - } - if v.Destination == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destination")) - } else if v.Destination != nil { - if err := validateAnalyticsExportDestination(v.Destination); err != nil { - invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTag(v *types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tag"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Value == nil { - invalidParams.Add(smithy.NewErrParamRequired("Value")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagging(v *types.Tagging) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tagging"} - if v.TagSet == nil { - invalidParams.Add(smithy.NewErrParamRequired("TagSet")) - } else if v.TagSet != nil { - if err := validateTagSet(v.TagSet); err != nil { - invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagSet(v []types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagSet"} - for i := range v { - if err := validateTag(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTargetGrant(v *types.TargetGrant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"} - if v.Grantee != nil { - if err := validateGrantee(v.Grantee); err != nil { - invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTargetGrants(v []types.TargetGrant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"} - for i := range v { - if err := validateTargetGrant(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTiering(v *types.Tiering) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tiering"} - if v.Days == nil { - invalidParams.Add(smithy.NewErrParamRequired("Days")) - } - if len(v.AccessTier) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTieringList(v []types.Tiering) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TieringList"} - for i := range v { - if err := validateTiering(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTopicConfiguration(v *types.TopicConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"} - if v.TopicArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("TopicArn")) - } - if v.Events == nil { - invalidParams.Add(smithy.NewErrParamRequired("Events")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTopicConfigurationList(v []types.TopicConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"} - for i := range v { - if err := validateTopicConfiguration(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"} - if v.ErrorDocument != nil { - if err := validateErrorDocument(v.ErrorDocument); err != nil { - invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError)) - } - } - if v.IndexDocument != nil { - if err := validateIndexDocument(v.IndexDocument); err != nil { - invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError)) - } - } - if v.RedirectAllRequestsTo != nil { - if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil { - invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError)) - } - } - if v.RoutingRules != nil { - if err := validateRoutingRules(v.RoutingRules); err != nil { - invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCopyObjectInput(v *CopyObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CopySource == nil { - invalidParams.Add(smithy.NewErrParamRequired("CopySource")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateBucketInput(v *CreateBucketInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CreateBucketConfiguration != nil { - if err := validateCreateBucketConfiguration(v.CreateBucketConfiguration); err != nil { - invalidParams.AddNested("CreateBucketConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateBucketMetadataConfigurationInput(v *CreateBucketMetadataConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.MetadataConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("MetadataConfiguration")) - } else if v.MetadataConfiguration != nil { - if err := validateMetadataConfiguration(v.MetadataConfiguration); err != nil { - invalidParams.AddNested("MetadataConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.MetadataTableConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("MetadataTableConfiguration")) - } else if v.MetadataTableConfiguration != nil { - if err := validateMetadataTableConfiguration(v.MetadataTableConfiguration); err != nil { - invalidParams.AddNested("MetadataTableConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateSessionInput(v *CreateSessionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateSessionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketInput(v *DeleteBucketInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketMetadataConfigurationInput(v *DeleteBucketMetadataConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteObjectInput(v *DeleteObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Delete == nil { - invalidParams.Add(smithy.NewErrParamRequired("Delete")) - } else if v.Delete != nil { - if err := validateDelete(v.Delete); err != nil { - invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketAclInput(v *GetBucketAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketMetadataConfigurationInput(v *GetBucketMetadataConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectAclInput(v *GetObjectAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectAttributesInput(v *GetObjectAttributesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectAttributesInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.ObjectAttributes == nil { - invalidParams.Add(smithy.NewErrParamRequired("ObjectAttributes")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectInput(v *GetObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpHeadBucketInput(v *HeadBucketInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpHeadObjectInput(v *HeadObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListObjectsInput(v *ListObjectsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListObjectsV2Input(v *ListObjectsV2Input) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListPartsInput(v *ListPartsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.AccelerateConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketAclInput(v *PutBucketAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"} - if v.AccessControlPolicy != nil { - if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) - } - } - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.AnalyticsConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration")) - } else if v.AnalyticsConfiguration != nil { - if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil { - invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CORSConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration")) - } else if v.CORSConfiguration != nil { - if err := validateCORSConfiguration(v.CORSConfiguration); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.ServerSideEncryptionConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration")) - } else if v.ServerSideEncryptionConfiguration != nil { - if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil { - invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.IntelligentTieringConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration")) - } else if v.IntelligentTieringConfiguration != nil { - if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil { - invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.InventoryConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration")) - } else if v.InventoryConfiguration != nil { - if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.LifecycleConfiguration != nil { - if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.BucketLoggingStatus == nil { - invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus")) - } else if v.BucketLoggingStatus != nil { - if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.MetricsConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration")) - } else if v.MetricsConfiguration != nil { - if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil { - invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.NotificationConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration")) - } else if v.NotificationConfiguration != nil { - if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.OwnershipControls == nil { - invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls")) - } else if v.OwnershipControls != nil { - if err := validateOwnershipControls(v.OwnershipControls); err != nil { - invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Policy == nil { - invalidParams.Add(smithy.NewErrParamRequired("Policy")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.ReplicationConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) - } else if v.ReplicationConfiguration != nil { - if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.RequestPaymentConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration")) - } else if v.RequestPaymentConfiguration != nil { - if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Tagging == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tagging")) - } else if v.Tagging != nil { - if err := validateTagging(v.Tagging); err != nil { - invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.VersioningConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.WebsiteConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration")) - } else if v.WebsiteConfiguration != nil { - if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectAclInput(v *PutObjectAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"} - if v.AccessControlPolicy != nil { - if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) - } - } - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectInput(v *PutObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Tagging == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tagging")) - } else if v.Tagging != nil { - if err := validateTagging(v.Tagging); err != nil { - invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.PublicAccessBlockConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpRenameObjectInput(v *RenameObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RenameObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.RenameSource == nil { - invalidParams.Add(smithy.NewErrParamRequired("RenameSource")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpRestoreObjectInput(v *RestoreObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.RestoreRequest != nil { - if err := validateRestoreRequest(v.RestoreRequest); err != nil { - invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpSelectObjectContentInput(v *SelectObjectContentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SelectObjectContentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Expression == nil { - invalidParams.Add(smithy.NewErrParamRequired("Expression")) - } - if len(v.ExpressionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) - } - if v.InputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) - } - if v.OutputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUpdateBucketMetadataInventoryTableConfigurationInput(v *UpdateBucketMetadataInventoryTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UpdateBucketMetadataInventoryTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.InventoryTableConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("InventoryTableConfiguration")) - } else if v.InventoryTableConfiguration != nil { - if err := validateInventoryTableConfigurationUpdates(v.InventoryTableConfiguration); err != nil { - invalidParams.AddNested("InventoryTableConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUpdateBucketMetadataJournalTableConfigurationInput(v *UpdateBucketMetadataJournalTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UpdateBucketMetadataJournalTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.JournalTableConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("JournalTableConfiguration")) - } else if v.JournalTableConfiguration != nil { - if err := validateJournalTableConfigurationUpdates(v.JournalTableConfiguration); err != nil { - invalidParams.AddNested("JournalTableConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CopySource == nil { - invalidParams.Add(smithy.NewErrParamRequired("CopySource")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.PartNumber == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUploadPartInput(v *UploadPartInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.PartNumber == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpWriteGetObjectResponseInput(v *WriteGetObjectResponseInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "WriteGetObjectResponseInput"} - if v.RequestRoute == nil { - invalidParams.Add(smithy.NewErrParamRequired("RequestRoute")) - } - if v.RequestToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("RequestToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md deleted file mode 100644 index dafddce704b8..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ /dev/null @@ -1,642 +0,0 @@ -# v1.28.2 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2025-08-20) - -* **Bug Fix**: Remove unused deserialization code. - -# v1.28.0 (2025-08-11) - -* **Feature**: Add support for configuring per-service Options via callback on global config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2025-08-04) - -* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.4 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2025-04-03) - -* No change notes available for this release. - -# v1.25.2 (2025-03-25) - -* No change notes available for this release. - -# v1.25.1 (2025-03-04.2) - -* **Bug Fix**: Add assurance test for operation order. - -# v1.25.0 (2025-02-27) - -* **Feature**: Track credential providers via User-Agent Feature ids -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.16 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.15 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.14 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.13 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.12 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.24.11 (2025-01-17) - -* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. - -# v1.24.10 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.9 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.8 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.7 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.6 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.5 (2024-11-07) - -* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses - -# v1.24.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.4 (2024-10-03) - -* No change notes available for this release. - -# v1.23.3 (2024-09-27) - -* No change notes available for this release. - -# v1.23.2 (2024-09-25) - -* No change notes available for this release. - -# v1.23.1 (2024-09-23) - -* No change notes available for this release. - -# v1.23.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.8 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.22.7 (2024-09-04) - -* No change notes available for this release. - -# v1.22.6 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.5 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.4 (2024-07-18) - -* No change notes available for this release. - -# v1.22.3 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.2 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.21.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.12 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.11 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.10 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.9 (2024-05-23) - -* No change notes available for this release. - -# v1.20.8 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.7 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.6 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.20.5 (2024-04-05) - -* No change notes available for this release. - -# v1.20.4 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.19.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.19.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2024-01-18) - -* No change notes available for this release. - -# v1.18.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.18.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.18.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.17.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-10-02) - -* **Feature**: Fix FIPS Endpoints in aws-us-gov. - -# v1.14.1 (2023-09-22) - -* No change notes available for this release. - -# v1.14.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.13.6 (2023-08-31) - -* No change notes available for this release. - -# v1.13.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-08-01) - -* No change notes available for this release. - -# v1.13.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2023-06-15) - -* No change notes available for this release. - -# v1.12.11 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2023-05-04) - -* No change notes available for this release. - -# v1.12.9 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2023-04-10) - -* No change notes available for this release. - -# v1.12.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.12.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.12.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.11.28 (2022-12-20) - -* No change notes available for this release. - -# v1.11.27 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.26 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.25 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.24 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.23 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.22 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.21 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2022-08-30) - -* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. - -# v1.11.18 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2022-08-15) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) - -# v1.11.16 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2022-08-08) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2022-07-11) - -* No change notes available for this release. - -# v1.11.11 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2022-06-16) - -* No change notes available for this release. - -# v1.11.8 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2022-05-26) - -* No change notes available for this release. - -# v1.11.6 (2022-05-25) - -* No change notes available for this release. - -# v1.11.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: Updated API models -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. - -# v1.6.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go deleted file mode 100644 index 2c498e4689a9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ /dev/null @@ -1,1019 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "SSO" -const ServiceAPIVersion = "2019-06-10" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso") -} - -// Client provides the API client to make operations call for AWS Single Sign-On. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AuthSchemePreference: cfg.AuthSchemePreference, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveInterceptors(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, func(o *Options) { - for _, opt := range cfg.ServiceOptions { - opt(ServiceID, o) - } - for _, opt := range optFns { - opt(o) - } - }) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func resolveInterceptors(cfg aws.Config, o *Options) { - o.Interceptors = cfg.Interceptors.Copy() -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") - }) - if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -type setCredentialSourceMiddleware struct { - ua *awsmiddleware.RequestUserAgent - options Options -} - -func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } - -func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) - if !ok { - return next.HandleBuild(ctx, in) - } - providerSources := asProviderSource.ProviderSources() - for _, source := range providerSources { - m.ua.AddCredentialsSource(source) - } - return next.HandleBuild(ctx, in) -} - -func addCredentialSource(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - mw := setCredentialSourceMiddleware{ua: ua, options: options} - return stack.Build.Insert(&mw, "UserAgent", middleware.Before) -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ - Interceptors: opts.Interceptors.BeforeRetryLoop, - }, "Retry", middleware.Before) -} - -func addInterceptAttempt(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ - BeforeAttempt: opts.Interceptors.BeforeAttempt, - AfterAttempt: opts.Interceptors.AfterAttempt, - }, "Retry", middleware.After) -} - -func addInterceptExecution(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&smithyhttp.InterceptExecution{ - BeforeExecution: opts.Interceptors.BeforeExecution, - AfterExecution: opts.Interceptors.AfterExecution, - }, middleware.Before) -} - -func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ - Interceptors: opts.Interceptors.BeforeSerialization, - }, "OperationSerializer", middleware.Before) -} - -func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ - Interceptors: opts.Interceptors.AfterSerialization, - }, "OperationSerializer", middleware.After) -} - -func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ - Interceptors: opts.Interceptors.BeforeSigning, - }, "Signing", middleware.Before) -} - -func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ - Interceptors: opts.Interceptors.AfterSigning, - }, "Signing", middleware.After) -} - -func addInterceptTransmit(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ - BeforeTransmit: opts.Interceptors.BeforeTransmit, - AfterTransmit: opts.Interceptors.AfterTransmit, - }, middleware.After) -} - -func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ - Interceptors: opts.Interceptors.BeforeDeserialization, - }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse) -} - -func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ - Interceptors: opts.Interceptors.AfterDeserialization, - }, "OperationDeserializer", middleware.Before) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go deleted file mode 100644 index df5dc1674f34..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ /dev/null @@ -1,201 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the STS short-term credentials for a given role name that is assigned -// to the user. -func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { - if params == nil { - params = &GetRoleCredentialsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRoleCredentialsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRoleCredentialsInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - // The identifier for the AWS account that is assigned to the user. - // - // This member is required. - AccountId *string - - // The friendly name of the role that is assigned to the user. - // - // This member is required. - RoleName *string - - noSmithyDocumentSerde -} - -type GetRoleCredentialsOutput struct { - - // The credentials for the role that is assigned to the user. - RoleCredentials *types.RoleCredentials - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRoleCredentials", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go deleted file mode 100644 index 2a3b2ad90212..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ /dev/null @@ -1,299 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all roles that are assigned to the user for a given AWS account. -func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { - if params == nil { - params = &ListAccountRolesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListAccountRolesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListAccountRolesInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - // The identifier for the AWS account that is assigned to the user. - // - // This member is required. - AccountId *string - - // The number of items that clients can request per page. - MaxResults *int32 - - // The page token from the previous response output when you request subsequent - // pages. - NextToken *string - - noSmithyDocumentSerde -} - -type ListAccountRolesOutput struct { - - // The page token client that is used to retrieve the list of accounts. - NextToken *string - - // A paginated response with the list of roles and the next token if more results - // are available. - RoleList []types.RoleInfo - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles -type ListAccountRolesPaginatorOptions struct { - // The number of items that clients can request per page. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListAccountRolesPaginator is a paginator for ListAccountRoles -type ListAccountRolesPaginator struct { - options ListAccountRolesPaginatorOptions - client ListAccountRolesAPIClient - params *ListAccountRolesInput - nextToken *string - firstPage bool -} - -// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator -func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { - if params == nil { - params = &ListAccountRolesInput{} - } - - options := ListAccountRolesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListAccountRolesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListAccountRolesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListAccountRoles page. -func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListAccountRoles", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go deleted file mode 100644 index f6114a7c105e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity -// Center User Guide. This operation returns a paginated response. -// -// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers -func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { - if params == nil { - params = &ListAccountsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListAccountsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListAccountsInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - // This is the number of items clients can request per page. - MaxResults *int32 - - // (Optional) When requesting subsequent pages, this is the page token from the - // previous response output. - NextToken *string - - noSmithyDocumentSerde -} - -type ListAccountsOutput struct { - - // A paginated response with the list of account information and the next token if - // more results are available. - AccountList []types.AccountInfo - - // The page token client that is used to retrieve the list of accounts. - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpListAccountsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListAccountsPaginatorOptions is the paginator options for ListAccounts -type ListAccountsPaginatorOptions struct { - // This is the number of items clients can request per page. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListAccountsPaginator is a paginator for ListAccounts -type ListAccountsPaginator struct { - options ListAccountsPaginatorOptions - client ListAccountsAPIClient - params *ListAccountsInput - nextToken *string - firstPage bool -} - -// NewListAccountsPaginator returns a new ListAccountsPaginator -func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { - if params == nil { - params = &ListAccountsInput{} - } - - options := ListAccountsPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListAccountsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListAccountsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListAccounts page. -func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListAccounts", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go deleted file mode 100644 index 2c7f181c344e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Removes the locally stored SSO tokens from the client-side cache and sends an -// API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. -// -// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM -// Identity Center sign in session is used to obtain an IAM session, as specified -// in the corresponding IAM Identity Center permission set. More specifically, IAM -// Identity Center assumes an IAM role in the target account on behalf of the user, -// and the corresponding temporary AWS credentials are returned to the client. -// -// After user logout, any existing IAM role sessions that were created by using -// IAM Identity Center permission sets continue based on the duration configured in -// the permission set. For more information, see [User authentications]in the IAM Identity Center User -// Guide. -// -// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html -func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { - if params == nil { - params = &LogoutInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*LogoutOutput) - out.ResultMetadata = metadata - return out, nil -} - -type LogoutInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - noSmithyDocumentSerde -} - -type LogoutOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpLogoutValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "Logout", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go deleted file mode 100644 index 708e53c5ad59..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ /dev/null @@ -1,363 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "slices" - "strings" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "Logout": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "awsssoportal") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - sorted := sortAuthOptions(options, m.options.AuthSchemePreference) - for _, option := range sorted { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { - byPriority := make([]*smithyauth.Option, 0, len(options)) - for _, prefName := range preferred { - for _, option := range options { - optName := option.SchemeID - if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { - optName = parts[1] - } - if prefName == optName { - byPriority = append(byPriority, option) - } - } - } - for _, option := range options { - if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { - return o.SchemeID == option.SchemeID - }) { - byPriority = append(byPriority, option) - } - } - return byPriority -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go deleted file mode 100644 index a889f3c7a7f1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ /dev/null @@ -1,1172 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "strings" -) - -type awsRestjson1_deserializeOpGetRoleCredentials struct { -} - -func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) - } - output := &GetRoleCredentialsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRoleCredentialsOutput - if *v == nil { - sv = &GetRoleCredentialsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "roleCredentials": - if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpListAccountRoles struct { -} - -func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) - } - output := &ListAccountRolesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListAccountRolesOutput - if *v == nil { - sv = &ListAccountRolesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "roleList": - if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpListAccounts struct { -} - -func (*awsRestjson1_deserializeOpListAccounts) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) - } - output := &ListAccountsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListAccountsOutput - if *v == nil { - sv = &ListAccountsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountList": - if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpLogout struct { -} - -func (*awsRestjson1_deserializeOpLogout) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) - } - output := &LogoutOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ResourceNotFoundException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.TooManyRequestsException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnauthorizedException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AccountInfo - if *v == nil { - sv = &types.AccountInfo{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) - } - sv.AccountId = ptr.String(jtv) - } - - case "accountName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) - } - sv.AccountName = ptr.String(jtv) - } - - case "emailAddress": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) - } - sv.EmailAddress = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.AccountInfo - if *v == nil { - cv = []types.AccountInfo{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.AccountInfo - destAddr := &col - if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestException - if *v == nil { - sv = &types.InvalidRequestException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ResourceNotFoundException - if *v == nil { - sv = &types.ResourceNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RoleCredentials - if *v == nil { - sv = &types.RoleCredentials{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessKeyId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) - } - sv.AccessKeyId = ptr.String(jtv) - } - - case "expiration": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Expiration = i64 - } - - case "secretAccessKey": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) - } - sv.SecretAccessKey = ptr.String(jtv) - } - - case "sessionToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) - } - sv.SessionToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RoleInfo - if *v == nil { - sv = &types.RoleInfo{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) - } - sv.AccountId = ptr.String(jtv) - } - - case "roleName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) - } - sv.RoleName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RoleInfo - if *v == nil { - cv = []types.RoleInfo{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RoleInfo - destAddr := &col - if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.TooManyRequestsException - if *v == nil { - sv = &types.TooManyRequestsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnauthorizedException - if *v == nil { - sv = &types.UnauthorizedException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go deleted file mode 100644 index 7f6e429fda8a..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package sso provides the API client, operations, and parameter types for AWS -// Single Sign-On. -// -// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web -// service that makes it easy for you to assign user access to IAM Identity Center -// resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. -// -// Although AWS Single Sign-On was renamed, the sso and identitystore API -// namespaces will continue to retain their original name for backward -// compatibility purposes. For more information, see [IAM Identity Center rename]. -// -// This reference guide describes the IAM Identity Center Portal operations that -// you can call programatically and includes detailed information on data types and -// errors. -// -// AWS provides SDKs that consist of libraries and sample code for various -// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. -// The SDKs provide a convenient way to create programmatic access to IAM Identity -// Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see [Tools for Amazon Web Services]. -// -// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ -// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed -package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go deleted file mode 100644 index 53c6bc756124..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "awsssoportal" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - if "aws-us-gov" == _PartitionResult.Name { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json deleted file mode 100644 index 1a88fe4df8e4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_GetRoleCredentials.go", - "api_op_ListAccountRoles.go", - "api_op_ListAccounts.go", - "api_op_Logout.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "sra_operation_order_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.22", - "module": "github.com/aws/aws-sdk-go-v2/service/sso", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go deleted file mode 100644 index def5652fd9f3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package sso - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go deleted file mode 100644 index 04416606be06..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ /dev/null @@ -1,597 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver SSO endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsEusc *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-5.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-5", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-eusc", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsEusc, - IsRegionalized: true, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go deleted file mode 100644 index 277550af4701..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ /dev/null @@ -1,239 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Client registry of operation interceptors. - Interceptors smithyhttp.InterceptorRegistry - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - to.Interceptors = o.Interceptors.Copy() - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go deleted file mode 100644 index a7a5b57de08f..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go +++ /dev/null @@ -1,309 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type awsRestjson1_serializeOpGetRoleCredentials struct { -} - -func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRoleCredentialsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/federation/credentials") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.AccountId != nil { - encoder.SetQuery("account_id").String(*v.AccountId) - } - - if v.RoleName != nil { - encoder.SetQuery("role_name").String(*v.RoleName) - } - - return nil -} - -type awsRestjson1_serializeOpListAccountRoles struct { -} - -func (*awsRestjson1_serializeOpListAccountRoles) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListAccountRolesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/assignment/roles") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.AccountId != nil { - encoder.SetQuery("account_id").String(*v.AccountId) - } - - if v.MaxResults != nil { - encoder.SetQuery("max_result").Integer(*v.MaxResults) - } - - if v.NextToken != nil { - encoder.SetQuery("next_token").String(*v.NextToken) - } - - return nil -} - -type awsRestjson1_serializeOpListAccounts struct { -} - -func (*awsRestjson1_serializeOpListAccounts) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListAccountsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.MaxResults != nil { - encoder.SetQuery("max_result").Integer(*v.MaxResults) - } - - if v.NextToken != nil { - encoder.SetQuery("next_token").String(*v.NextToken) - } - - return nil -} - -type awsRestjson1_serializeOpLogout struct { -} - -func (*awsRestjson1_serializeOpLogout) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*LogoutInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/logout") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go deleted file mode 100644 index e97a126e8bb5..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// Indicates that a problem occurred with the input to the request. For example, a -// required parameter might be missing or out of range. -type InvalidRequestException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified resource doesn't exist. -type ResourceNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ResourceNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ResourceNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ResourceNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the request is being made too frequently and is more than what -// the server can handle. -type TooManyRequestsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyRequestsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyRequestsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyRequestsException" - } - return *e.ErrorCodeOverride -} -func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -type UnauthorizedException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnauthorizedException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnauthorizedException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnauthorizedException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnauthorizedException" - } - return *e.ErrorCodeOverride -} -func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go deleted file mode 100644 index 07ac468e3184..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -// Provides information about your AWS account. -type AccountInfo struct { - - // The identifier of the AWS account that is assigned to the user. - AccountId *string - - // The display name of the AWS account that is assigned to the user. - AccountName *string - - // The email address of the AWS account that is assigned to the user. - EmailAddress *string - - noSmithyDocumentSerde -} - -// Provides information about the role credentials that are assigned to the user. -type RoleCredentials struct { - - // The identifier used for the temporary security credentials. For more - // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. - // - // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - AccessKeyId *string - - // The date on which temporary security credentials expire. - Expiration int64 - - // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS - // IAM User Guide. - // - // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - SecretAccessKey *string - - // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS - // IAM User Guide. - // - // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - SessionToken *string - - noSmithyDocumentSerde -} - -// Provides information about the role that is assigned to the user. -type RoleInfo struct { - - // The identifier of the AWS account assigned to the user. - AccountId *string - - // The friendly name of the role that is assigned to the user. - RoleName *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go deleted file mode 100644 index f6bf461f74ba..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpGetRoleCredentials struct { -} - -func (*validateOpGetRoleCredentials) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetRoleCredentialsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetRoleCredentialsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListAccountRoles struct { -} - -func (*validateOpListAccountRoles) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListAccountRolesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListAccountRolesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListAccounts struct { -} - -func (*validateOpListAccounts) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListAccountsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListAccountsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpLogout struct { -} - -func (*validateOpLogout) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*LogoutInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpLogoutInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) -} - -func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) -} - -func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) -} - -func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpLogout{}, middleware.After) -} - -func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} - if v.RoleName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleName")) - } - if v.AccountId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccountId")) - } - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if v.AccountId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccountId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListAccountsInput(v *ListAccountsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpLogoutInput(v *LogoutInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md deleted file mode 100644 index 35085b7fa8f9..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ /dev/null @@ -1,641 +0,0 @@ -# v1.34.0 (2025-08-26) - -* **Feature**: Remove incorrect endpoint tests - -# v1.33.2 (2025-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.1 (2025-08-20) - -* **Bug Fix**: Remove unused deserialization code. - -# v1.33.0 (2025-08-11) - -* **Feature**: Add support for configuring per-service Options via callback on global config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.0 (2025-08-04) - -* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.4 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.3 (2025-06-17) - -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.2 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.1 (2025-04-03) - -* No change notes available for this release. - -# v1.30.0 (2025-03-27) - -* **Feature**: This release adds AwsAdditionalDetails in the CreateTokenWithIAM API response. - -# v1.29.2 (2025-03-24) - -* No change notes available for this release. - -# v1.29.1 (2025-03-04.2) - -* **Bug Fix**: Add assurance test for operation order. - -# v1.29.0 (2025-02-27) - -* **Feature**: Track credential providers via User-Agent Feature ids -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.15 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.14 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.13 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.12 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.11 (2025-01-24) - -* **Documentation**: Fixed typos in the descriptions. -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.28.10 (2025-01-17) - -* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. - -# v1.28.9 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2024-10-03) - -* No change notes available for this release. - -# v1.27.3 (2024-09-27) - -* No change notes available for this release. - -# v1.27.2 (2024-09-25) - -* No change notes available for this release. - -# v1.27.1 (2024-09-23) - -* No change notes available for this release. - -# v1.27.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.8 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.26.7 (2024-09-04) - -* No change notes available for this release. - -# v1.26.6 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.4 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2024-07-03) - -* No change notes available for this release. - -# v1.26.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.25.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.6 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.5 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.4 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.3 (2024-05-23) - -* No change notes available for this release. - -# v1.24.2 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2024-05-10) - -* **Feature**: Updated request parameters for PKCE support. - -# v1.23.5 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.23.4 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.22.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.22.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.7 (2024-01-16) - -* No change notes available for this release. - -# v1.21.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.21.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.21.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.3 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.20.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-11-17) - -* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`. - -# v1.19.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-09-22) - -* No change notes available for this release. - -# v1.17.0 (2023-09-20) - -* **Feature**: Update FIPS endpoints in aws-us-gov. - -# v1.16.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.15.6 (2023-09-05) - -* No change notes available for this release. - -# v1.15.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-08-01) - -* No change notes available for this release. - -# v1.15.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.14 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.13 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.12 (2023-06-15) - -* No change notes available for this release. - -# v1.14.11 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.10 (2023-05-04) - -* No change notes available for this release. - -# v1.14.9 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.8 (2023-04-10) - -* No change notes available for this release. - -# v1.14.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.14.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.14.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.13.11 (2022-12-19) - -* No change notes available for this release. - -# v1.13.10 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-09-30) - -* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. - -# v1.13.5 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-08-25) - -* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. - -# v1.12.14 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-08) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-07-11) - -* No change notes available for this release. - -# v1.12.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-05-27) - -* No change notes available for this release. - -# v1.12.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-07) - -* **Feature**: API client updated -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-11) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-09-17) - -* **Feature**: Updated API client and endpoints to latest revision. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-27) - -* **Feature**: Updated API model to latest revision. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go deleted file mode 100644 index 12ad2f5d9d57..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ /dev/null @@ -1,1019 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "SSO OIDC" -const ServiceAPIVersion = "2019-06-10" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc") -} - -// Client provides the API client to make operations call for AWS SSO OIDC. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AuthSchemePreference: cfg.AuthSchemePreference, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveInterceptors(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, func(o *Options) { - for _, opt := range cfg.ServiceOptions { - opt(ServiceID, o) - } - for _, opt := range optFns { - opt(o) - } - }) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func resolveInterceptors(cfg aws.Config, o *Options) { - o.Interceptors = cfg.Interceptors.Copy() -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") - }) - if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -type setCredentialSourceMiddleware struct { - ua *awsmiddleware.RequestUserAgent - options Options -} - -func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } - -func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) - if !ok { - return next.HandleBuild(ctx, in) - } - providerSources := asProviderSource.ProviderSources() - for _, source := range providerSources { - m.ua.AddCredentialsSource(source) - } - return next.HandleBuild(ctx, in) -} - -func addCredentialSource(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - mw := setCredentialSourceMiddleware{ua: ua, options: options} - return stack.Build.Insert(&mw, "UserAgent", middleware.Before) -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ - Interceptors: opts.Interceptors.BeforeRetryLoop, - }, "Retry", middleware.Before) -} - -func addInterceptAttempt(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ - BeforeAttempt: opts.Interceptors.BeforeAttempt, - AfterAttempt: opts.Interceptors.AfterAttempt, - }, "Retry", middleware.After) -} - -func addInterceptExecution(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&smithyhttp.InterceptExecution{ - BeforeExecution: opts.Interceptors.BeforeExecution, - AfterExecution: opts.Interceptors.AfterExecution, - }, middleware.Before) -} - -func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ - Interceptors: opts.Interceptors.BeforeSerialization, - }, "OperationSerializer", middleware.Before) -} - -func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ - Interceptors: opts.Interceptors.AfterSerialization, - }, "OperationSerializer", middleware.After) -} - -func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ - Interceptors: opts.Interceptors.BeforeSigning, - }, "Signing", middleware.Before) -} - -func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ - Interceptors: opts.Interceptors.AfterSigning, - }, "Signing", middleware.After) -} - -func addInterceptTransmit(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ - BeforeTransmit: opts.Interceptors.BeforeTransmit, - AfterTransmit: opts.Interceptors.AfterTransmit, - }, middleware.After) -} - -func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ - Interceptors: opts.Interceptors.BeforeDeserialization, - }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse) -} - -func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ - Interceptors: opts.Interceptors.AfterDeserialization, - }, "OperationDeserializer", middleware.Before) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go deleted file mode 100644 index b3875eeabeb3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ /dev/null @@ -1,272 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates and returns access and refresh tokens for clients that are -// authenticated using client secrets. The access token can be used to fetch -// short-lived credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. -func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { - if params == nil { - params = &CreateTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateTokenInput struct { - - // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClientAPI. - // - // This member is required. - ClientId *string - - // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClientAPI. - // - // This member is required. - ClientSecret *string - - // Supports the following OAuth grant types: Authorization Code, Device Code, and - // Refresh Token. Specify one of the following values, depending on the grant type - // that you want: - // - // * Authorization Code - authorization_code - // - // * Device Code - urn:ietf:params:oauth:grant-type:device_code - // - // * Refresh Token - refresh_token - // - // This member is required. - GrantType *string - - // Used only when calling this API for the Authorization Code grant type. The - // short-lived code is used to identify this authorization request. - Code *string - - // Used only when calling this API for the Authorization Code grant type. This - // value is generated by the client and presented to validate the original code - // challenge value the client passed at authorization time. - CodeVerifier *string - - // Used only when calling this API for the Device Code grant type. This - // short-lived code is used to identify this authorization request. This comes from - // the result of the StartDeviceAuthorizationAPI. - DeviceCode *string - - // Used only when calling this API for the Authorization Code grant type. This - // value specifies the location of the client or application that has registered to - // receive the authorization code. - RedirectUri *string - - // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-lived tokens, such as the access token, that might expire. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // The list of scopes for which authorization is requested. The access token that - // is issued is limited to the scopes that are granted. If this value is not - // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient. - Scope []string - - noSmithyDocumentSerde -} - -type CreateTokenOutput struct { - - // A bearer token to access Amazon Web Services accounts and applications assigned - // to a user. - AccessToken *string - - // Indicates the time in seconds when an access token will expire. - ExpiresIn int32 - - // The idToken is not implemented or supported. For more information about the - // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. - // - // A JSON Web Token (JWT) that identifies who is associated with the issued access - // token. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - IdToken *string - - // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // Used to notify the client that the returned token is an access token. The - // supported token type is Bearer . - TokenType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateTokenValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go deleted file mode 100644 index 78b37b5eafde..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ /dev/null @@ -1,310 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates and returns access and refresh tokens for clients and applications that -// are authenticated using IAM entities. The access token can be used to fetch -// short-lived credentials for the assigned Amazon Web Services accounts or to -// access application APIs using bearer authentication. -func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { - if params == nil { - params = &CreateTokenWithIAMInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateTokenWithIAMOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateTokenWithIAMInput struct { - - // The unique identifier string for the client or application. This value is an - // application ARN that has OAuth grants configured. - // - // This member is required. - ClientId *string - - // Supports the following OAuth grant types: Authorization Code, Refresh Token, - // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: - // - // * Authorization Code - authorization_code - // - // * Refresh Token - refresh_token - // - // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer - // - // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange - // - // This member is required. - GrantType *string - - // Used only when calling this API for the JWT Bearer grant type. This value - // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To - // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the - // application. - Assertion *string - - // Used only when calling this API for the Authorization Code grant type. This - // short-lived code is used to identify this authorization request. The code is - // obtained through a redirect from IAM Identity Center to a redirect URI persisted - // in the Authorization Code GrantOptions for the application. - Code *string - - // Used only when calling this API for the Authorization Code grant type. This - // value is generated by the client and presented to validate the original code - // challenge value the client passed at authorization time. - CodeVerifier *string - - // Used only when calling this API for the Authorization Code grant type. This - // value specifies the location of the client or application that has registered to - // receive the authorization code. - RedirectUri *string - - // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-lived tokens, such as the access token, that might expire. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the type of token that the requester can receive. The following values - // are supported: - // - // * Access Token - urn:ietf:params:oauth:token-type:access_token - // - // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token - RequestedTokenType *string - - // The list of scopes for which authorization is requested. The access token that - // is issued is limited to the scopes that are granted. If the value is not - // specified, IAM Identity Center authorizes all scopes configured for the - // application, including the following default scopes: openid , aws , - // sts:identity_context . - Scope []string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the subject of the exchange. The value of the subject token must be an - // access token issued by IAM Identity Center to a different client or application. - // The access token must have authorized scopes that indicate the requested - // application as a target audience. - SubjectToken *string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: - // - // * Access Token - urn:ietf:params:oauth:token-type:access_token - SubjectTokenType *string - - noSmithyDocumentSerde -} - -type CreateTokenWithIAMOutput struct { - - // A bearer token to access Amazon Web Services accounts and applications assigned - // to a user. - AccessToken *string - - // A structure containing information from the idToken . Only the identityContext - // is in it, which is a value extracted from the idToken . This provides direct - // access to identity information without requiring JWT parsing. - AwsAdditionalDetails *types.AwsAdditionalDetails - - // Indicates the time in seconds when an access token will expire. - ExpiresIn int32 - - // A JSON Web Token (JWT) that identifies the user associated with the issued - // access token. - IdToken *string - - // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: - // - // * Access Token - urn:ietf:params:oauth:token-type:access_token - // - // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token - IssuedTokenType *string - - // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // The list of scopes for which authorization is granted. The access token that is - // issued is limited to the scopes that are granted. - Scope []string - - // Used to notify the requester that the returned token is an access token. The - // supported token type is Bearer . - TokenType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateTokenWithIAM", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go deleted file mode 100644 index 8d50092fb15b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ /dev/null @@ -1,242 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Registers a public client with IAM Identity Center. This allows clients to -// perform authorization using the authorization code grant with Proof Key for Code -// Exchange (PKCE) or the device code grant. -func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { - if params == nil { - params = &RegisterClientInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*RegisterClientOutput) - out.ResultMetadata = metadata - return out, nil -} - -type RegisterClientInput struct { - - // The friendly name of the client. - // - // This member is required. - ClientName *string - - // The type of client. The service supports only public as a client type. Anything - // other than public will be rejected by the service. - // - // This member is required. - ClientType *string - - // This IAM Identity Center application ARN is used to define - // administrator-managed configuration for public client access to resources. At - // authorization, the scopes, grants, and redirect URI available to this client - // will be restricted by this application resource. - EntitledApplicationArn *string - - // The list of OAuth 2.0 grant types that are defined by the client. This list is - // used to restrict the token granting flows available to the client. Supports the - // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh - // Token. - // - // * Authorization Code - authorization_code - // - // * Device Code - urn:ietf:params:oauth:grant-type:device_code - // - // * Refresh Token - refresh_token - GrantTypes []string - - // The IAM Identity Center Issuer URL associated with an instance of IAM Identity - // Center. This value is needed for user access to resources through the client. - IssuerUrl *string - - // The list of redirect URI that are defined by the client. At completion of - // authorization, this list is used to restrict what locations the user agent can - // be redirected back to. - RedirectUris []string - - // The list of scopes that are defined by the client. Upon authorization, this - // list is used to restrict permissions when granting an access token. - Scopes []string - - noSmithyDocumentSerde -} - -type RegisterClientOutput struct { - - // An endpoint that the client can use to request authorization. - AuthorizationEndpoint *string - - // The unique identifier string for each client. This client uses this identifier - // to get authenticated by the service in subsequent calls. - ClientId *string - - // Indicates the time at which the clientId and clientSecret were issued. - ClientIdIssuedAt int64 - - // A secret string generated for the client. The client will use this string to - // get authenticated by the service in subsequent calls. - ClientSecret *string - - // Indicates the time at which the clientId and clientSecret will become invalid. - ClientSecretExpiresAt int64 - - // An endpoint that the client can use to create tokens. - TokenEndpoint *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpRegisterClientValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "RegisterClient", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go deleted file mode 100644 index 7242ac82b68b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ /dev/null @@ -1,224 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Initiates device authorization by requesting a pair of verification codes from -// the authorization service. -func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { - if params == nil { - params = &StartDeviceAuthorizationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*StartDeviceAuthorizationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type StartDeviceAuthorizationInput struct { - - // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the RegisterClientAPI - // operation. - // - // This member is required. - ClientId *string - - // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClientAPI operation. - // - // This member is required. - ClientSecret *string - - // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] - // in the IAM Identity Center User Guide. - // - // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html - // - // This member is required. - StartUrl *string - - noSmithyDocumentSerde -} - -type StartDeviceAuthorizationOutput struct { - - // The short-lived code that is used by the device when polling for a session - // token. - DeviceCode *string - - // Indicates the number of seconds in which the verification code will become - // invalid. - ExpiresIn int32 - - // Indicates the number of seconds the client must wait between attempts when - // polling for a session. - Interval int32 - - // A one-time user verification code. This is needed to authorize an in-use device. - UserCode *string - - // The URI of the verification page that takes the userCode to authorize the - // device. - VerificationUri *string - - // An alternate URL that the client can use to automatically launch a browser. - // This process skips the manual step in which the user visits the verification - // page and enters their code. - VerificationUriComplete *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "StartDeviceAuthorization", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go deleted file mode 100644 index 89b01c629dd1..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ /dev/null @@ -1,357 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "slices" - "strings" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sso-oauth") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - sorted := sortAuthOptions(options, m.options.AuthSchemePreference) - for _, option := range sorted { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { - byPriority := make([]*smithyauth.Option, 0, len(options)) - for _, prefName := range preferred { - for _, option := range options { - optName := option.SchemeID - if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { - optName = parts[1] - } - if prefName == optName { - byPriority = append(byPriority, option) - } - } - } - for _, option := range options { - if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { - return o.SchemeID == option.SchemeID - }) { - byPriority = append(byPriority, option) - } - } - return byPriority -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go deleted file mode 100644 index 17712c6dc7ca..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ /dev/null @@ -1,2223 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strings" -) - -type awsRestjson1_deserializeOpCreateToken struct { -} - -func (*awsRestjson1_deserializeOpCreateToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) - } - output := &CreateTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("AuthorizationPendingException", errorCode): - return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) - - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidGrantException", errorCode): - return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateTokenOutput - if *v == nil { - sv = &CreateTokenOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) - } - sv.AccessToken = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "idToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) - } - sv.IdToken = ptr.String(jtv) - } - - case "refreshToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) - } - sv.RefreshToken = ptr.String(jtv) - } - - case "tokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) - } - sv.TokenType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpCreateTokenWithIAM struct { -} - -func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata) - } - output := &CreateTokenWithIAMOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("AuthorizationPendingException", errorCode): - return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) - - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidGrantException", errorCode): - return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidRequestRegionException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateTokenWithIAMOutput - if *v == nil { - sv = &CreateTokenWithIAMOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) - } - sv.AccessToken = ptr.String(jtv) - } - - case "awsAdditionalDetails": - if err := awsRestjson1_deserializeDocumentAwsAdditionalDetails(&sv.AwsAdditionalDetails, value); err != nil { - return err - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "idToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) - } - sv.IdToken = ptr.String(jtv) - } - - case "issuedTokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value) - } - sv.IssuedTokenType = ptr.String(jtv) - } - - case "refreshToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) - } - sv.RefreshToken = ptr.String(jtv) - } - - case "scope": - if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil { - return err - } - - case "tokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) - } - sv.TokenType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpRegisterClient struct { -} - -func (*awsRestjson1_deserializeOpRegisterClient) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) - } - output := &RegisterClientOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientMetadataException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) - - case strings.EqualFold("InvalidRedirectUriException", errorCode): - return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *RegisterClientOutput - if *v == nil { - sv = &RegisterClientOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.AuthorizationEndpoint = ptr.String(jtv) - } - - case "clientId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) - } - sv.ClientId = ptr.String(jtv) - } - - case "clientIdIssuedAt": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ClientIdIssuedAt = i64 - } - - case "clientSecret": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) - } - sv.ClientSecret = ptr.String(jtv) - } - - case "clientSecretExpiresAt": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ClientSecretExpiresAt = i64 - } - - case "tokenEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.TokenEndpoint = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpStartDeviceAuthorization struct { -} - -func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) - } - output := &StartDeviceAuthorizationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *StartDeviceAuthorizationOutput - if *v == nil { - sv = &StartDeviceAuthorizationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "deviceCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) - } - sv.DeviceCode = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "interval": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Interval = int32(i64) - } - - case "userCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) - } - sv.UserCode = ptr.String(jtv) - } - - case "verificationUri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.VerificationUri = ptr.String(jtv) - } - - case "verificationUriComplete": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.VerificationUriComplete = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AccessDeniedException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AuthorizationPendingException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ExpiredTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InternalServerException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidClientException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidClientMetadataException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidGrantException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRedirectUriException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestRegionException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidScopeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SlowDownException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnauthorizedClientException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnsupportedGrantTypeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AccessDeniedException - if *v == nil { - sv = &types.AccessDeniedException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AuthorizationPendingException - if *v == nil { - sv = &types.AuthorizationPendingException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAwsAdditionalDetails(v **types.AwsAdditionalDetails, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AwsAdditionalDetails - if *v == nil { - sv = &types.AwsAdditionalDetails{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "identityContext": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdentityContext to be of type string, got %T instead", value) - } - sv.IdentityContext = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ExpiredTokenException - if *v == nil { - sv = &types.ExpiredTokenException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InternalServerException - if *v == nil { - sv = &types.InternalServerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidClientException - if *v == nil { - sv = &types.InvalidClientException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidClientMetadataException - if *v == nil { - sv = &types.InvalidClientMetadataException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidGrantException - if *v == nil { - sv = &types.InvalidGrantException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRedirectUriException - if *v == nil { - sv = &types.InvalidRedirectUriException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestException - if *v == nil { - sv = &types.InvalidRequestException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestRegionException - if *v == nil { - sv = &types.InvalidRequestRegionException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "endpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Location to be of type string, got %T instead", value) - } - sv.Endpoint = ptr.String(jtv) - } - - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - case "region": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Region to be of type string, got %T instead", value) - } - sv.Region = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidScopeException - if *v == nil { - sv = &types.InvalidScopeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Scope to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.SlowDownException - if *v == nil { - sv = &types.SlowDownException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnauthorizedClientException - if *v == nil { - sv = &types.UnauthorizedClientException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnsupportedGrantTypeException - if *v == nil { - sv = &types.UnsupportedGrantTypeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go deleted file mode 100644 index f3510b18c546..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package ssooidc provides the API client, operations, and parameter types for -// AWS SSO OIDC. -// -// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a -// client (such as CLI or a native application) to register with IAM Identity -// Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. -// -// # API namespaces -// -// IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity -// Center OpenID Connect uses the sso-oidc namespace. -// -// # Considerations for using this guide -// -// Before you begin using this guide, we recommend that you first review the -// following important information about how the IAM Identity Center OIDC service -// works. -// -// - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to -// enable single sign-on authentication with the CLI. -// -// - With older versions of the CLI, the service only emits OIDC access tokens, -// so to obtain a new token, users must explicitly re-authenticate. To access the -// OIDC flow that supports token refresh and doesn’t require re-authentication, -// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with -// support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see [Configure Amazon Web Services access portal session duration]. -// -// - The access tokens provided by this service grant access to all Amazon Web -// Services account entitlements assigned to an IAM Identity Center user, not just -// a particular application. -// -// - The documentation in this guide does not describe the mechanism to convert -// the access token into Amazon Web Services Auth (“sigv4”) credentials for use -// with IAM-protected Amazon Web Services service endpoints. For more information, -// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. -// -// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity -// Center User Guide. -// -// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html -// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html -// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 -// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go deleted file mode 100644 index 6feea0c9fec4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "sso-oauth" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json deleted file mode 100644 index 35f180975a8c..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_CreateToken.go", - "api_op_CreateTokenWithIAM.go", - "api_op_RegisterClient.go", - "api_op_StartDeviceAuthorization.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "sra_operation_order_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.22", - "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go deleted file mode 100644 index d0758f943579..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ssooidc - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.34.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go deleted file mode 100644 index ba7b4f9eb01d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ /dev/null @@ -1,597 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver SSO OIDC endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsEusc *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-5.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-5", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "oidc.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-eusc", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsEusc, - IsRegionalized: true, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go deleted file mode 100644 index f35f3d5a3123..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ /dev/null @@ -1,239 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Client registry of operation interceptors. - Interceptors smithyhttp.InterceptorRegistry - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - to.Interceptors = o.Interceptors.Copy() - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go deleted file mode 100644 index 1ad103d1ed88..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ /dev/null @@ -1,512 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "bytes" - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyjson "github.com/aws/smithy-go/encoding/json" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type awsRestjson1_serializeOpCreateToken struct { -} - -func (*awsRestjson1_serializeOpCreateToken) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/token") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.ClientSecret != nil { - ok := object.Key("clientSecret") - ok.String(*v.ClientSecret) - } - - if v.Code != nil { - ok := object.Key("code") - ok.String(*v.Code) - } - - if v.CodeVerifier != nil { - ok := object.Key("codeVerifier") - ok.String(*v.CodeVerifier) - } - - if v.DeviceCode != nil { - ok := object.Key("deviceCode") - ok.String(*v.DeviceCode) - } - - if v.GrantType != nil { - ok := object.Key("grantType") - ok.String(*v.GrantType) - } - - if v.RedirectUri != nil { - ok := object.Key("redirectUri") - ok.String(*v.RedirectUri) - } - - if v.RefreshToken != nil { - ok := object.Key("refreshToken") - ok.String(*v.RefreshToken) - } - - if v.Scope != nil { - ok := object.Key("scope") - if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { - return err - } - } - - return nil -} - -type awsRestjson1_serializeOpCreateTokenWithIAM struct { -} - -func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateTokenWithIAMInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Assertion != nil { - ok := object.Key("assertion") - ok.String(*v.Assertion) - } - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.Code != nil { - ok := object.Key("code") - ok.String(*v.Code) - } - - if v.CodeVerifier != nil { - ok := object.Key("codeVerifier") - ok.String(*v.CodeVerifier) - } - - if v.GrantType != nil { - ok := object.Key("grantType") - ok.String(*v.GrantType) - } - - if v.RedirectUri != nil { - ok := object.Key("redirectUri") - ok.String(*v.RedirectUri) - } - - if v.RefreshToken != nil { - ok := object.Key("refreshToken") - ok.String(*v.RefreshToken) - } - - if v.RequestedTokenType != nil { - ok := object.Key("requestedTokenType") - ok.String(*v.RequestedTokenType) - } - - if v.Scope != nil { - ok := object.Key("scope") - if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { - return err - } - } - - if v.SubjectToken != nil { - ok := object.Key("subjectToken") - ok.String(*v.SubjectToken) - } - - if v.SubjectTokenType != nil { - ok := object.Key("subjectTokenType") - ok.String(*v.SubjectTokenType) - } - - return nil -} - -type awsRestjson1_serializeOpRegisterClient struct { -} - -func (*awsRestjson1_serializeOpRegisterClient) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*RegisterClientInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/client/register") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientName != nil { - ok := object.Key("clientName") - ok.String(*v.ClientName) - } - - if v.ClientType != nil { - ok := object.Key("clientType") - ok.String(*v.ClientType) - } - - if v.EntitledApplicationArn != nil { - ok := object.Key("entitledApplicationArn") - ok.String(*v.EntitledApplicationArn) - } - - if v.GrantTypes != nil { - ok := object.Key("grantTypes") - if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { - return err - } - } - - if v.IssuerUrl != nil { - ok := object.Key("issuerUrl") - ok.String(*v.IssuerUrl) - } - - if v.RedirectUris != nil { - ok := object.Key("redirectUris") - if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { - return err - } - } - - if v.Scopes != nil { - ok := object.Key("scopes") - if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { - return err - } - } - - return nil -} - -type awsRestjson1_serializeOpStartDeviceAuthorization struct { -} - -func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*StartDeviceAuthorizationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/device_authorization") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.ClientSecret != nil { - ok := object.Key("clientSecret") - ok.String(*v.ClientSecret) - } - - if v.StartUrl != nil { - ok := object.Key("startUrl") - ok.String(*v.StartUrl) - } - - return nil -} - -func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go deleted file mode 100644 index 2cfe7b48fed6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ /dev/null @@ -1,428 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// You do not have sufficient access to perform this action. -type AccessDeniedException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *AccessDeniedException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *AccessDeniedException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "AccessDeniedException" - } - return *e.ErrorCodeOverride -} -func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a request to authorize a client with an access user session -// token is pending. -type AuthorizationPendingException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *AuthorizationPendingException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *AuthorizationPendingException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *AuthorizationPendingException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "AuthorizationPendingException" - } - return *e.ErrorCodeOverride -} -func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the token issued by the service is expired and is no longer -// valid. -type ExpiredTokenException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *ExpiredTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ExpiredTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ExpiredTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ExpiredTokenException" - } - return *e.ErrorCodeOverride -} -func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that an error from the service occurred while trying to process a -// request. -type InternalServerException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InternalServerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InternalServerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InternalServerException" - } - return *e.ErrorCodeOverride -} -func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } - -// Indicates that the clientId or clientSecret in the request is invalid. For -// example, this can occur when a client sends an incorrect clientId or an expired -// clientSecret . -type InvalidClientException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidClientException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidClientException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidClientException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidClientException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client information sent in the request during registration -// is invalid. -type InvalidClientMetadataException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidClientMetadataException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidClientMetadataException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidClientMetadataException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidClientMetadataException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateTokenrequest with an invalid grant type. -type InvalidGrantException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidGrantException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidGrantException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidGrantException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidGrantException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that one or more redirect URI in the request is not supported for -// this operation. -type InvalidRedirectUriException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidRedirectUriException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRedirectUriException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRedirectUriException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRedirectUriException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that something is wrong with the input to the request. For example, a -// required parameter might be missing or out of range. -type InvalidRequestException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a token provided as input to the request was issued by and is -// only usable by calling IAM Identity Center endpoints in another region. -type InvalidRequestRegionException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - Endpoint *string - Region *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestRegionException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestRegionException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestRegionException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestRegionException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the scope provided in the request is invalid. -type InvalidScopeException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidScopeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidScopeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidScopeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidScopeException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client is making the request too frequently and is more than -// the service can handle. -type SlowDownException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *SlowDownException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *SlowDownException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *SlowDownException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "SlowDownException" - } - return *e.ErrorCodeOverride -} -func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client is not currently authorized to make the request. This -// can happen when a clientId is not issued for a public client. -type UnauthorizedClientException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *UnauthorizedClientException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnauthorizedClientException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnauthorizedClientException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnauthorizedClientException" - } - return *e.ErrorCodeOverride -} -func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the grant type in the request is not supported by the service. -type UnsupportedGrantTypeException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedGrantTypeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedGrantTypeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedGrantTypeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedGrantTypeException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go deleted file mode 100644 index 2e8f3ea031c7..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -// This structure contains Amazon Web Services-specific parameter extensions for -// the token endpoint responses and includes the identity context. -type AwsAdditionalDetails struct { - - // STS context assertion that carries a user identifier to the Amazon Web Services - // service that it calls and can be used to obtain an identity-enhanced IAM role - // session. This value corresponds to the sts:identity_context claim in the ID - // token. - IdentityContext *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go deleted file mode 100644 index 9c17e4c8e184..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go +++ /dev/null @@ -1,184 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpCreateToken struct { -} - -func (*validateOpCreateToken) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateTokenInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateTokenInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateTokenWithIAM struct { -} - -func (*validateOpCreateTokenWithIAM) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateTokenWithIAMInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateTokenWithIAMInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpRegisterClient struct { -} - -func (*validateOpRegisterClient) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*RegisterClientInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpRegisterClientInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpStartDeviceAuthorization struct { -} - -func (*validateOpStartDeviceAuthorization) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*StartDeviceAuthorizationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpStartDeviceAuthorizationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) -} - -func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After) -} - -func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) -} - -func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) -} - -func validateOpCreateTokenInput(v *CreateTokenInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.ClientSecret == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) - } - if v.GrantType == nil { - invalidParams.Add(smithy.NewErrParamRequired("GrantType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.GrantType == nil { - invalidParams.Add(smithy.NewErrParamRequired("GrantType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpRegisterClientInput(v *RegisterClientInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} - if v.ClientName == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientName")) - } - if v.ClientType == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.ClientSecret == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) - } - if v.StartUrl == nil { - invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md deleted file mode 100644 index ca18a1e9f2c3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ /dev/null @@ -1,685 +0,0 @@ -# v1.38.0 (2025-08-21) - -* **Feature**: Remove incorrect endpoint tests -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.37.1 (2025-08-20) - -* **Bug Fix**: Remove unused deserialization code. - -# v1.37.0 (2025-08-11) - -* **Feature**: Add support for configuring per-service Options via callback on global config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.36.0 (2025-08-04) - -* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.35.1 (2025-07-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.35.0 (2025-07-28) - -* **Feature**: Add support for HTTP interceptors. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.34.1 (2025-07-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.34.0 (2025-06-17) - -* **Feature**: The AWS Security Token Service APIs AssumeRoleWithSAML and AssumeRoleWithWebIdentity can now be invoked without pre-configured AWS credentials in the SDK configuration. -* **Dependency Update**: Update to smithy-go v1.22.4. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.21 (2025-06-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.20 (2025-06-06) - -* No change notes available for this release. - -# v1.33.19 (2025-04-10) - -* No change notes available for this release. - -# v1.33.18 (2025-04-03) - -* No change notes available for this release. - -# v1.33.17 (2025-03-04.2) - -* **Bug Fix**: Add assurance test for operation order. - -# v1.33.16 (2025-02-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.15 (2025-02-18) - -* **Bug Fix**: Bump go version to 1.22 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.14 (2025-02-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.13 (2025-02-04) - -* No change notes available for this release. - -# v1.33.12 (2025-01-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.11 (2025-01-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.10 (2025-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade to smithy-go v1.22.2. - -# v1.33.9 (2025-01-17) - -* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. - -# v1.33.8 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.7 (2025-01-14) - -* No change notes available for this release. - -# v1.33.6 (2025-01-10) - -* **Documentation**: Fixed typos in the descriptions. - -# v1.33.5 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.4 (2025-01-08) - -* No change notes available for this release. - -# v1.33.3 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.2 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.1 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.0 (2024-11-14) - -* **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks. - -# v1.32.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.4 (2024-10-03) - -* No change notes available for this release. - -# v1.31.3 (2024-09-27) - -* No change notes available for this release. - -# v1.31.2 (2024-09-25) - -* No change notes available for this release. - -# v1.31.1 (2024-09-23) - -* No change notes available for this release. - -# v1.31.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.8 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.30.7 (2024-09-04) - -* No change notes available for this release. - -# v1.30.6 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.5 (2024-08-22) - -* No change notes available for this release. - -# v1.30.4 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.3 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.2 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.29.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.13 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.12 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.11 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.10 (2024-05-23) - -* No change notes available for this release. - -# v1.28.9 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.8 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.7 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.28.6 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.5 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-03-04) - -* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.27.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.27.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.7 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2023-12-20) - -* No change notes available for this release. - -# v1.26.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.26.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. -* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication. - -# v1.26.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Documentation**: Documentation updates for AWS Security Token Service. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.25.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2023-11-17) - -* **Documentation**: API updates for the AWS Security Token Service - -# v1.25.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-10-02) - -* **Feature**: STS API updates for assumeRole - -# v1.22.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.21.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-08-01) - -* No change notes available for this release. - -# v1.21.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-07-25) - -* **Feature**: API updates for the AWS Security Token Service - -# v1.19.3 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.2 (2023-06-15) - -* No change notes available for this release. - -# v1.19.1 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-05-08) - -* **Feature**: Documentation updates for AWS Security Token Service. - -# v1.18.11 (2023-05-04) - -* No change notes available for this release. - -# v1.18.10 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-04-10) - -* No change notes available for this release. - -# v1.18.8 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.18.4 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. - -# v1.18.2 (2023-01-25) - -* **Documentation**: Doc only change to update wording in a key topic - -# v1.18.1 (2023-01-23) - -* No change notes available for this release. - -# v1.18.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.17.7 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-11-22) - -* No change notes available for this release. - -# v1.17.4 (2022-11-17) - -* **Documentation**: Documentation updates for AWS Security Token Service. - -# v1.17.3 (2022-11-16) - -* No change notes available for this release. - -# v1.17.2 (2022-11-10) - -* No change notes available for this release. - -# v1.17.1 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-10-21) - -* **Feature**: Add presign functionality for sts:AssumeRole operation -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2022-08-30) - -* No change notes available for this release. - -# v1.16.14 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2022-05-16) - -* **Documentation**: Documentation updates for AWS Security Token Service. - -# v1.16.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: Updated service client model to latest release. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2021-12-21) - -* **Feature**: Updated to latest service endpoints - -# v1.11.1 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-11-30) - -* **Feature**: API client updated - -# v1.10.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-12) - -* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. - -# v1.9.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-10-21) - -* **Feature**: API client updated -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.2 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-07-15) - -* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* **Documentation**: Updated service model to latest revision. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-06-25) - -* **Feature**: API client updated -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go deleted file mode 100644 index 6658babc95f6..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ /dev/null @@ -1,1171 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/protocol/query" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "STS" -const ServiceAPIVersion = "2011-06-15" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts") -} - -// Client provides the API client to make operations call for AWS Security Token -// Service. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - AuthSchemePreference: cfg.AuthSchemePreference, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveInterceptors(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, func(o *Options) { - for _, opt := range cfg.ServiceOptions { - opt(ServiceID, o) - } - for _, opt := range optFns { - opt(o) - } - }) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func resolveInterceptors(cfg aws.Config, o *Options) { - o.Interceptors = cfg.Interceptors.Copy() -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") - }) - if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -type setCredentialSourceMiddleware struct { - ua *awsmiddleware.RequestUserAgent - options Options -} - -func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } - -func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) - if !ok { - return next.HandleBuild(ctx, in) - } - providerSources := asProviderSource.ProviderSources() - for _, source := range providerSources { - m.ua.AddCredentialsSource(source) - } - return next.HandleBuild(ctx, in) -} - -func addCredentialSource(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - mw := setCredentialSourceMiddleware{ua: ua, options: options} - return stack.Build.Insert(&mw, "UserAgent", middleware.Before) -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -// HTTPPresignerV4 represents presigner interface used by presign url client -type HTTPPresignerV4 interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignOptions represents the presign client options -type PresignOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // Presigner is the presigner used by the presign url client - Presigner HTTPPresignerV4 -} - -func (o PresignOptions) copy() PresignOptions { - clientOptions := make([]func(*Options), len(o.ClientOptions)) - copy(clientOptions, o.ClientOptions) - o.ClientOptions = clientOptions - return o -} - -// WithPresignClientFromClientOptions is a helper utility to retrieve a function -// that takes PresignOption as input -func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { - return withPresignClientFromClientOptions(optFns).options -} - -type withPresignClientFromClientOptions []func(*Options) - -func (w withPresignClientFromClientOptions) options(o *PresignOptions) { - o.ClientOptions = append(o.ClientOptions, w...) -} - -// PresignClient represents the presign url client -type PresignClient struct { - client *Client - options PresignOptions -} - -// NewPresignClient generates a presign client using provided API Client and -// presign options -func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { - var options PresignOptions - for _, fn := range optFns { - fn(&options) - } - if len(options.ClientOptions) != 0 { - c = New(c.options, options.ClientOptions...) - } - - if options.Presigner == nil { - options.Presigner = newDefaultV4Signer(c.options) - } - - return &PresignClient{ - client: c, - options: options, - } -} - -func withNopHTTPClientAPIOption(o *Options) { - o.HTTPClient = smithyhttp.NopClient{} -} - -type presignContextPolyfillMiddleware struct { -} - -func (*presignContextPolyfillMiddleware) ID() string { - return "presignContextPolyfill" -} - -func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - schemeID := rscheme.Scheme.SchemeID() - - if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { - if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr) - } - } else if schemeID == "aws.auth#sigv4a" { - if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) - } - } - - return next.HandleFinalize(ctx, in) -} - -type presignConverter PresignOptions - -func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - } - stack.Deserialize.Clear() - stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) - stack.Build.Remove("UserAgent") - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.Presigner, - LogSigning: options.ClientLogMode.IsSigning(), - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - // convert request to a GET request - err = query.AddAsGetRequestMiddleware(stack) - if err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ - Interceptors: opts.Interceptors.BeforeRetryLoop, - }, "Retry", middleware.Before) -} - -func addInterceptAttempt(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ - BeforeAttempt: opts.Interceptors.BeforeAttempt, - AfterAttempt: opts.Interceptors.AfterAttempt, - }, "Retry", middleware.After) -} - -func addInterceptExecution(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&smithyhttp.InterceptExecution{ - BeforeExecution: opts.Interceptors.BeforeExecution, - AfterExecution: opts.Interceptors.AfterExecution, - }, middleware.Before) -} - -func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ - Interceptors: opts.Interceptors.BeforeSerialization, - }, "OperationSerializer", middleware.Before) -} - -func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error { - return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ - Interceptors: opts.Interceptors.AfterSerialization, - }, "OperationSerializer", middleware.After) -} - -func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ - Interceptors: opts.Interceptors.BeforeSigning, - }, "Signing", middleware.Before) -} - -func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error { - return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ - Interceptors: opts.Interceptors.AfterSigning, - }, "Signing", middleware.After) -} - -func addInterceptTransmit(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ - BeforeTransmit: opts.Interceptors.BeforeTransmit, - AfterTransmit: opts.Interceptors.AfterTransmit, - }, middleware.After) -} - -func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ - Interceptors: opts.Interceptors.BeforeDeserialization, - }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse) -} - -func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error { - return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ - Interceptors: opts.Interceptors.AfterDeserialization, - }, "OperationDeserializer", middleware.Before) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go deleted file mode 100644 index f3a93418fa01..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ /dev/null @@ -1,580 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources. These temporary credentials consist of an access -// key ID, a secret access key, and a security token. Typically, you use AssumeRole -// within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the -// IAM User Guide. -// -// # Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any Amazon Web Services service with the following exception: You -// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken -// API operations. -// -// (Optional) You can pass inline or managed session policies to this operation. -// You can pass a single JSON policy document to use as an inline session policy. -// You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use -// as managed session policies. The plaintext that you use for both inline and -// managed session policies can't exceed 2,048 characters. Passing policies to this -// operation returns new temporary credentials. The resulting session's permissions -// are the intersection of the role's identity-based policy and the session -// policies. You can use the role's temporary credentials in subsequent Amazon Web -// Services API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed by the -// identity-based policy of the role that is being assumed. For more information, -// see [Session Policies]in the IAM User Guide. -// -// When you create a role, you create two policies: a role trust policy that -// specifies who can assume the role, and a permissions policy that specifies what -// can be done with the role. You specify the trusted principal that is allowed to -// assume the role in the role trust policy. -// -// To assume a role from a different account, your Amazon Web Services account -// must be trusted by the role. The trust relationship is defined in the role's -// trust policy when the role is created. That trust policy states which accounts -// are allowed to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have -// permissions that are delegated from the account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN of the -// role in the other account. -// -// To allow a user to assume a role in the same account, you can do either of the -// following: -// -// - Attach a policy to the user that allows the user to call AssumeRole (as long -// as the role's trust policy trusts the account). -// -// - Add the user as a principal directly in the role's trust policy. -// -// You can do either because the role’s trust policy acts as an IAM resource-based -// policy. When a resource-based policy grants access to a principal in the same -// account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. -// -// # Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM -// User Guide. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during role -// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. -// -// # Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information when -// you call AssumeRole . This is useful for cross-account scenarios to ensure that -// the user that assumes the role has been authenticated with an Amazon Web -// Services MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication. If the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication might -// look like the following example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. -// -// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that the -// MFA device produces. -// -// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { - if params == nil { - params = &AssumeRoleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleInput struct { - - // The Amazon Resource Name (ARN) of the role to assume. - // - // This member is required. - RoleArn *string - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role is - // assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the account - // that owns the role. The role session name is also used in the ARN of the assumed - // role principal. This means that subsequent cross-account API requests that use - // the temporary security credentials will expose the role session name to the - // external account in their CloudTrail logs. - // - // For security purposes, administrators can view this field in [CloudTrail logs] to help identify - // who performed an action in Amazon Web Services. Your administrator might require - // that you specify your user name as the session name when you assume the role. - // For more information, see [sts:RoleSessionName]sts:RoleSessionName . - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds - // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname - // - // This member is required. - RoleSessionName *string - - // The duration, in seconds, of the role session. The value specified can range - // from 900 seconds (15 minutes) up to the maximum session duration set for the - // role. The maximum session duration setting can have a value from 1 hour to 12 - // hours. If you specify a value higher than this setting or the administrator - // setting (whichever is lower), the operation fails. For example, if you specify a - // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. - // - // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API - // role session to a maximum of one hour. When you use the AssumeRole API - // operation to assume a role, you can specify the duration of your role session - // with the DurationSeconds parameter. You can specify a parameter value of up to - // 43200 seconds (12 hours), depending on the maximum session duration setting for - // your role. However, if you assume a role using role chaining and provide a - // DurationSeconds parameter value greater than one hour, the operation fails. To - // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request to - // the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. - // - // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration - // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html - DurationSeconds *int32 - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A - // cross-account role is usually set up to trust everyone in an account. Therefore, - // the administrator of the trusting account might send an external ID to the - // administrator of the trusted account. That way, only someone with the ID can - // assume the role, rather than everyone in the account. For more information about - // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@:/- - // - // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html - ExternalId *string - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM - // User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // For more information about role session permissions, see [Session policies]. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the - // Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's - // identity-based policy and the session policies. You can use the role's temporary - // credentials in subsequent Amazon Web Services API calls to access resources in - // the account that owns the role. You cannot use session policies to grant more - // permissions than those allowed by the identity-based policy of the role that is - // being assumed. For more information, see [Session Policies]in the IAM User Guide. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - // A list of previously acquired trusted context assertions in the format of a - // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. - // - // The following is an example of a ProvidedContext value that includes a single - // trusted context assertion and the ARN of the context provider from which the - // trusted context assertion was generated. - // - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] - ProvidedContexts []types.ProvidedContext - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy of - // the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as - // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - SerialNumber *string - - // The source identity specified by the principal that is calling the AssumeRole - // operation. The source identity value persists across [chained role]sessions. - // - // You can require users to specify a source identity when they assume a role. You - // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy. - // You can use source identity information in CloudTrail logs to determine who took - // actions with a role. You can use the aws:SourceIdentity condition key to - // further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the - // IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: +=,.@-. You cannot use a - // value that begins with the text aws: . This prefix is reserved for Amazon Web - // Services internal use. - // - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity - SourceIdentity *string - - // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 - // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This means - // that you cannot have separate Department and department tag keys. Assume that - // the role has the Department = Marketing tag and you pass the department = - // engineering session tag. Department and department are not saved as separate - // tags, and the session tag passed in the request takes precedence over the role - // tag. - // - // Additionally, if you used temporary credentials to perform this operation, the - // new session inherits any transitive session tags from the calling session. If - // you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the CloudTrail logs. For - // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. - // - // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs - Tags []types.Tag - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA. (In other words, if the policy includes a condition that - // tests for MFA). If the role being assumed requires MFA and if the TokenCode - // value is missing or expired, the AssumeRole call returns an "access denied" - // error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string - - // A list of keys for session tags that you want to set as transitive. If you set - // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. - // - // This parameter is optional. The transitive status of a session tag does not - // impact its packed binary size. - // - // If you choose not to specify a transitive tag key, then no tags are passed from - // this session to any subsequent sessions. - // - // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining - TransitiveTagKeys []string - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRole request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type AssumeRoleOutput struct { - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. For - // example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole . - AssumedRoleUser *types.AssumedRoleUser - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The source identity specified by the principal that is calling the AssumeRole - // operation. - // - // You can require users to specify a source identity when they assume a role. You - // do this by using the sts:SourceIdentity condition key in a role trust policy. - // You can use source identity information in CloudTrail logs to determine who took - // actions with a role. You can use the aws:SourceIdentity condition key to - // further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the - // IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - SourceIdentity *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRole", - } -} - -// PresignAssumeRole is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &AssumeRoleInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, - c.client.addOperationAssumeRoleMiddlewares, - presignConverter(options).convertToPresignMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go deleted file mode 100644 index 9dcceec12a25..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ /dev/null @@ -1,488 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials for users who have been -// authenticated via a SAML authentication response. This operation provides a -// mechanism for tying an enterprise identity store or directory to role-based -// Amazon Web Services access without user-specific credentials or configuration. -// For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of an -// access key ID, a secret access key, and a security token. Applications can use -// these temporary security credentials to sign calls to Amazon Web Services -// services. -// -// # Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML -// authentication response's SessionNotOnOrAfter value, whichever is shorter. You -// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the -// maximum session duration setting for the role. This setting can have a value -// from 1 hour to 12 hours. To learn how to view the maximum value for your role, -// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console URL. -// For more information, see [Using IAM Roles]in the IAM User Guide. -// -// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one -// hour. When you use the AssumeRole API operation to assume a role, you can -// specify the duration of your role session with the DurationSeconds parameter. -// You can specify a parameter value of up to 43200 seconds (12 hours), depending -// on the maximum session duration setting for your role. However, if you assume a -// role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. -// -// # Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used to -// make API calls to any Amazon Web Services service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a -// single JSON policy document to use as an inline session policy. You can also -// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed -// session policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. Passing policies to this operation -// returns new temporary credentials. The resulting session's permissions are the -// intersection of the role's identity-based policy and the session policies. You -// can use the role's temporary credentials in subsequent Amazon Web Services API -// calls to access resources in the account that owns the role. You cannot use -// session policies to grant more permissions than those allowed by the -// identity-based policy of the role that is being assumed. For more information, -// see [Session Policies]in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services -// security credentials. The identity of the caller is validated by using keys in -// the metadata document that is uploaded for the SAML provider entity for your -// identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The -// entry includes the value in the NameID element of the SAML assertion. We -// recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the -// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). -// -// # Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML -// assertion as session tags. Each session tag consists of a key name and an -// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User -// Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed -// 128 characters and the values can’t exceed 256 characters. For these and -// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed inline session policy, -// managed policy ARNs, and session tags into a packed binary format that has a -// separate limit. Your request can fail for this limit even if your plaintext -// meets the other requirements. The PackedPolicySize response element indicates -// by percentage how close the policies and tags for your request are to the upper -// size limit. -// -// You can pass a session tag with the same key as a tag that is attached to the -// role. When you do, session tags override the role's tags with the same key. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during role -// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. -// -// # SAML Configuration -// -// Before your application can call AssumeRoleWithSAML , you must configure your -// SAML identity provider (IdP) to issue the claims required by Amazon Web -// Services. Additionally, you must use Identity and Access Management (IAM) to -// create a SAML provider entity in your Amazon Web Services account that -// represents your identity provider. You must also create an IAM role that -// specifies this SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// [About SAML 2.0-based Federation] -// - in the IAM User Guide. -// -// [Creating SAML Identity Providers] -// - in the IAM User Guide. -// -// [Configuring a Relying Party and Claims] -// - in the IAM User Guide. -// -// [Creating a Role for SAML 2.0 Federation] -// - in the IAM User Guide. -// -// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session -// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html -// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length -// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html -// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining -// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html -// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { - if params == nil { - params = &AssumeRoleWithSAMLInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleWithSAMLOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleWithSAMLInput struct { - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the - // IdP. - // - // This member is required. - PrincipalArn *string - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // This member is required. - RoleArn *string - - // The base64 encoded SAML authentication response provided by the IdP. - // - // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. - // - // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html - // - // This member is required. - SAMLAssertion *string - - // The duration, in seconds, of the role session. Your role session lasts for the - // duration that you specify for the DurationSeconds parameter, or until the time - // specified in the SAML authentication response's SessionNotOnOrAfter value, - // whichever is shorter. You can provide a DurationSeconds value from 900 seconds - // (15 minutes) up to the maximum session duration setting for the role. This - // setting can have a value from 1 hour to 12 hours. If you specify a value higher - // than this setting, the operation fails. For example, if you specify a session - // duration of 12 hours, but your administrator set the maximum session duration to - // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request to - // the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. - // - // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session - // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM - // User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // For more information about role session permissions, see [Session policies]. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the - // Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's - // identity-based policy and the session policies. You can use the role's temporary - // credentials in subsequent Amazon Web Services API calls to access resources in - // the account that owns the role. You cannot use session policies to grant more - // permissions than those allowed by the identity-based policy of the role that is - // being assumed. For more information, see [Session Policies]in the IAM User Guide. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type AssumeRoleWithSAMLOutput struct { - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *types.AssumedRoleUser - - // The value of the Recipient attribute of the SubjectConfirmationData element of - // the SAML assertion. - Audience *string - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // The value of the Issuer element of the SAML assertion. - Issuer *string - - // A hash value based on the concatenation of the following: - // - // - The Issuer response value. - // - // - The Amazon Web Services account ID. - // - // - The friendly name (the last part of the ARN) of the SAML provider in IAM. - // - // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) - NameQualifier *string - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The value in the SourceIdentity attribute in the SAML assertion. The source - // identity value persists across [chained role]sessions. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with that - // user. After the source identity is set, the value cannot be changed. It is - // present in the request for all actions that are taken by the role and persists - // across [chained role]sessions. You can configure your SAML identity provider to use an - // attribute associated with your users, like user name or email, as the source - // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to - // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in - // the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - SourceIdentity *string - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient or - // persistent . - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , - // that prefix is removed. For example, - // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . - // If the format includes any other prefix, the format is returned with no - // modifications. - SubjectType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoleWithSAML", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go deleted file mode 100644 index 5975a0cdee86..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ /dev/null @@ -1,508 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials for users who have been -// authenticated in a mobile or web application with a web identity provider. -// Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also -// supply the user with a consistent identity throughout the lifetime of an -// application. -// -// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web -// Services security credentials. Therefore, you can distribute an application (for -// example, on mobile devices) that requests temporary security credentials without -// including long-term Amazon Web Services credentials in the application. You also -// don't need to deploy server-based proxy services that use long-term Amazon Web -// Services credentials. Instead, the identity of the caller is validated by using -// a token from the web identity provider. For a comparison of -// AssumeRoleWithWebIdentity with the other API operations that produce temporary -// credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to Amazon Web Services service API -// operations. -// -// # Session Duration -// -// By default, the temporary security credentials created by -// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional -// DurationSeconds parameter to specify the duration of your session. You can -// provide a value from 900 seconds (15 minutes) up to the maximum session duration -// setting for the role. This setting can have a value from 1 hour to 12 hours. To -// learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide. -// The maximum session duration limit applies when you use the AssumeRole* API -// operations or the assume-role* CLI commands. However the limit does not apply -// when you use those operations to create a console URL. For more information, see -// [Using IAM Roles]in the IAM User Guide. -// -// # Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can be -// used to make API calls to any Amazon Web Services service with the following -// exception: you cannot call the STS GetFederationToken or GetSessionToken API -// operations. -// -// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a -// single JSON policy document to use as an inline session policy. You can also -// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed -// session policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. Passing policies to this operation -// returns new temporary credentials. The resulting session's permissions are the -// intersection of the role's identity-based policy and the session policies. You -// can use the role's temporary credentials in subsequent Amazon Web Services API -// calls to access resources in the account that owns the role. You cannot use -// session policies to grant more permissions than those allowed by the -// identity-based policy of the role that is being assumed. For more information, -// see [Session Policies]in the IAM User Guide. -// -// # Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed -// 128 characters and the values can’t exceed 256 characters. For these and -// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed inline session policy, -// managed policy ARNs, and session tags into a packed binary format that has a -// separate limit. Your request can fail for this limit even if your plaintext -// meets the other requirements. The PackedPolicySize response element indicates -// by percentage how close the policies and tags for your request are to the upper -// size limit. -// -// You can pass a session tag with the same key as a tag that is attached to the -// role. When you do, the session tag overrides the role tag with the same key. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during role -// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. -// -// # Identities -// -// Before your application can call AssumeRoleWithWebIdentity , you must have an -// identity token from a supported identity provider and create a role that the -// application can assume. The role that your application assumes must trust the -// identity provider that is associated with the identity token. In other words, -// the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail -// logs. The entry includes the [Subject]of the provided web identity token. We recommend -// that you avoid using any personally identifiable information (PII) in this -// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. -// -// For more information about how to use OIDC federation and the -// AssumeRoleWithWebIdentity API, see the following resources: -// -// [Using Web Identity Federation API Operations for Mobile Apps] -// - and [Federation Through a Web-based Identity Provider]. -// -// [Amazon Web Services SDK for iOS Developer Guide] -// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the -// identity providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ -// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ -// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html -// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity -// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -// [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration -// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html -// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes -func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { - if params == nil { - params = &AssumeRoleWithWebIdentityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleWithWebIdentityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleWithWebIdentityInput struct { - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles]. - // The trust policies of these roles must accept the cognito-identity.amazonaws.com - // service principal and must contain the cognito-identity.amazonaws.com:aud - // condition key to restrict role assumption to users from your intended identity - // pools. A policy that trusts Amazon Cognito identity pools without this condition - // creates a risk that a user from an unintended identity pool can assume the role. - // For more information, see [Trust policies for IAM roles in Basic (Classic) authentication]in the Amazon Cognito Developer Guide. - // - // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html - // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies - // - // This member is required. - RoleArn *string - - // An identifier for the assumed role session. Typically, you pass the name or - // identifier that is associated with the user who is using your application. That - // way, the temporary security credentials that your application will use are - // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. - // - // For security purposes, administrators can view this field in [CloudTrail logs] to help identify - // who performed an action in Amazon Web Services. Your administrator might require - // that you specify your user name as the session name when you assume the role. - // For more information, see [sts:RoleSessionName]sts:RoleSessionName . - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds - // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname - // - // This member is required. - RoleSessionName *string - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the - // identity provider. Your application must get this token by authenticating the - // user who is using your application with a web identity provider before the - // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token - // must be formatted as either an integer or a long integer. Tokens must be signed - // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or - // ES512). - // - // This member is required. - WebIdentityToken *string - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify a - // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request to - // the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. - // - // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session - // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM - // User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // For more information about role session permissions, see [Session policies]. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the - // Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's - // identity-based policy and the session policies. You can use the role's temporary - // credentials in subsequent Amazon Web Services API calls to access resources in - // the account that owns the role. You cannot use session policies to grant more - // permissions than those allowed by the identity-based policy of the role that is - // being assumed. For more information, see [Session Policies]in the IAM User Guide. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - // The fully qualified host component of the domain name of the OAuth 2.0 identity - // provider. Do not specify this value for an OpenID Connect identity provider. - // - // Currently www.amazon.com and graph.facebook.com are the only supported identity - // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type AssumeRoleWithWebIdentityOutput struct { - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. For - // example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole . - AssumedRoleUser *types.AssumedRoleUser - - // The intended audience (also known as client ID) of the web identity token. This - // is traditionally the client identifier issued to the application that requested - // the web identity token. - Audience *string - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed in - // the AssumeRoleWithWebIdentity request. - Provider *string - - // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with that - // user. After the source identity is set, the value cannot be changed. It is - // present in the request for all actions that are taken by the role and persists - // across [chained role]sessions. You can configure your identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon - // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html - SourceIdentity *string - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with the - // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user - // and the application that acquired the WebIdentityToken (pairwise identifier). - // For OpenID Connect ID tokens, this field contains the value returned by the - // identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoleWithWebIdentity", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go deleted file mode 100644 index 571f06728a5b..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go +++ /dev/null @@ -1,253 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of short term credentials you can use to perform privileged tasks -// on a member account in your organization. -// -// Before you can launch a privileged session, you must have centralized root -// access in your organization. For steps to enable this feature, see [Centralize root access for member accounts]in the IAM -// User Guide. -// -// The STS global endpoint is not supported for AssumeRoot. You must send this -// request to a Regional STS endpoint. For more information, see [Endpoints]. -// -// You can track AssumeRoot in CloudTrail logs to determine what actions were -// performed in a session. For more information, see [Track privileged tasks in CloudTrail]in the IAM User Guide. -// -// [Endpoints]: https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html#sts-endpoints -// [Track privileged tasks in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-track-privileged-tasks.html -// [Centralize root access for member accounts]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-enable-root-access.html -func (c *Client) AssumeRoot(ctx context.Context, params *AssumeRootInput, optFns ...func(*Options)) (*AssumeRootOutput, error) { - if params == nil { - params = &AssumeRootInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoot", params, optFns, c.addOperationAssumeRootMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRootOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRootInput struct { - - // The member account principal ARN or account ID. - // - // This member is required. - TargetPrincipal *string - - // The identity based policy that scopes the session to the privileged tasks that - // can be performed. You can use one of following Amazon Web Services managed - // policies to scope root session actions. - // - // [IAMAuditRootUserCredentials] - // - // [IAMCreateRootUserPassword] - // - // [IAMDeleteRootUserCredentials] - // - // [S3UnlockBucketPolicy] - // - // [SQSUnlockQueuePolicy] - // - // [IAMDeleteRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMDeleteRootUserCredentials - // [IAMCreateRootUserPassword]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMCreateRootUserPassword - // [IAMAuditRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMAuditRootUserCredentials - // [S3UnlockBucketPolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-S3UnlockBucketPolicy - // [SQSUnlockQueuePolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-SQSUnlockQueuePolicy - // - // This member is required. - TaskPolicyArn *types.PolicyDescriptorType - - // The duration, in seconds, of the privileged session. The value can range from 0 - // seconds up to the maximum session duration of 900 seconds (15 minutes). If you - // specify a value higher than this setting, the operation fails. - // - // By default, the value is set to 900 seconds. - DurationSeconds *int32 - - noSmithyDocumentSerde -} - -type AssumeRootOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // The source identity specified by the principal that is calling the AssumeRoot - // operation. - // - // You can use the aws:SourceIdentity condition key to control access based on the - // value of source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles] - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - SourceIdentity *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoot{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoot{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoot"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpAssumeRootValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoot(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoot(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoot", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go deleted file mode 100644 index 786bac89b8ac..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ /dev/null @@ -1,225 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. -// -// For example, if a user is not authorized to perform an operation that he or she -// has requested, the request returns a Client.UnauthorizedOperation response (an -// HTTP 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. -// -// Only certain Amazon Web Services operations return an encoded authorization -// message. The documentation for an individual operation indicates whether that -// operation returns an encoded message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// contain privileged information that the user who requested the operation should -// not see. To decode an authorization status message, a user must be granted -// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( -// sts:DecodeAuthorizationMessage ) action. -// -// The decoded message includes the following type of information: -// -// - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User -// Guide. -// -// - The principal who made the request. -// -// - The requested action. -// -// - The requested resource. -// -// - The values of condition keys in the context of the user's request. -// -// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow -// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html -func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { - if params == nil { - params = &DecodeAuthorizationMessageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DecodeAuthorizationMessageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DecodeAuthorizationMessageInput struct { - - // The encoded message that was returned with the response. - // - // This member is required. - EncodedMessage *string - - noSmithyDocumentSerde -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an Amazon -// Web Services request. -type DecodeAuthorizationMessageOutput struct { - - // The API returns a response with the decoded message. - DecodedMessage *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DecodeAuthorizationMessage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go deleted file mode 100644 index 6c1f878981cf..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ /dev/null @@ -1,216 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, -// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, -// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access -// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// Amazon Web Services account to which the keys belong. Access key IDs beginning -// with AKIA are long-term credentials for an IAM user or the Amazon Web Services -// account root user. Access key IDs beginning with ASIA are temporary credentials -// that are created using STS operations. If the account in the response belongs to -// you, you can sign in as the root user and review your root user access keys. -// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who -// requested the temporary credentials for an ASIA access key, view the STS events -// in your [CloudTrail logs]in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might be -// active, inactive, or deleted. Active keys might not have permissions to perform -// an operation. Providing a deleted access key might return an error that the key -// doesn't exist. -// -// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html -// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html -// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html -func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { - if params == nil { - params = &GetAccessKeyInfoInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetAccessKeyInfoOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetAccessKeyInfoInput struct { - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters that - // can consist of any upper- or lowercase letter or digit. - // - // This member is required. - AccessKeyId *string - - noSmithyDocumentSerde -} - -type GetAccessKeyInfoOutput struct { - - // The number used to identify the Amazon Web Services account. - Account *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetAccessKeyInfo", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go deleted file mode 100644 index 7d0653398b31..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ /dev/null @@ -1,228 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns details about the IAM user or role whose credentials are used to call -// the operation. -// -// No permissions are required to perform this operation. If an administrator -// attaches a policy to your identity that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when access is denied. -// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. -// -// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa -func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { - if params == nil { - params = &GetCallerIdentityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetCallerIdentityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetCallerIdentityInput struct { - noSmithyDocumentSerde -} - -// Contains the response to a successful GetCallerIdentity request, including information about the -// entity making the request. -type GetCallerIdentityOutput struct { - - // The Amazon Web Services account ID number of the account that owns or contains - // the calling entity. - Account *string - - // The Amazon Web Services ARN associated with the calling entity. - Arn *string - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the [Principal table]found on the Policy Variables reference page in - // the IAM User Guide. - // - // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable - UserId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetCallerIdentity", - } -} - -// PresignGetCallerIdentity is used to generate a presigned HTTP Request which -// contains presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &GetCallerIdentityInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, - c.client.addOperationGetCallerIdentityMiddlewares, - presignConverter(options).convertToPresignMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go deleted file mode 100644 index 1c2f28e519c3..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ /dev/null @@ -1,429 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials (consisting of an access key -// ID, a secret access key, and a security token) for a user. A typical use is in a -// proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. -// -// You must call the GetFederationToken operation using the long-term security -// credentials of an IAM user. As a result, this call is appropriate in contexts -// where those credentials can be safeguarded, usually in a server-based -// application. For a comparison of GetFederationToken with the other API -// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// Although it is possible to call GetFederationToken using the security -// credentials of an Amazon Web Services account root user rather than an IAM user -// that you create for the purpose of a proxy application, we do not recommend it. -// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate users -// using a web identity provider like Login with Amazon, Facebook, Google, or an -// OpenID Connect-compatible identity provider. In this case, we recommend that you -// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User -// Guide. -// -// # Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by -// using the root user credentials have a maximum duration of 3,600 seconds (1 -// hour). -// -// # Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service with the following exceptions: -// -// - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. This limitation does not apply to console sessions. -// -// - You cannot call any STS operations except GetCallerIdentity . -// -// You can use temporary credentials for single sign-on (SSO) to the console. -// -// You must pass an inline or managed [session policy] to this operation. You can pass a single -// JSON policy document to use as an inline session policy. You can also specify up -// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session -// policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM user -// policies and the session policies that you pass. This gives you a way to further -// restrict the permissions for a federated user. You cannot use session policies -// to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For -// information about using GetFederationToken to create temporary security -// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session in the -// Principal element of the policy, the session has the permissions allowed by the -// policy. These permissions are granted in addition to the permissions granted by -// the session policies. -// -// # Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User -// Guide. -// -// You can create a mobile-based or browser-based app that can authenticate users -// using a web identity provider like Login with Amazon, Facebook, Google, or an -// OpenID Connect-compatible identity provider. In this case, we recommend that you -// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User -// Guide. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This means -// that you cannot have separate Department and department tag keys. Assume that -// the user that you are federating has the Department = Marketing tag and you -// pass the department = engineering session tag. Department and department are -// not saved as separate tags, and the session tag passed in the request takes -// precedence over the user tag. -// -// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity -// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Amazon Cognito]: http://aws.amazon.com/cognito/ -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken -// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { - if params == nil { - params = &GetFederationTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetFederationTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetFederationTokenInput struct { - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob ). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // This member is required. - Name *string - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using root user credentials are restricted to a maximum of 3,600 seconds (one - // hour). If the specified duration is longer than one hour, the session obtained - // by using root user credentials defaults to one hour. - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed [session policy] to this operation. You can pass a single - // JSON policy document to use as an inline session policy. You can also specify up - // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session - // policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection of - // the IAM user policies and the session policies that you pass. This gives you a - // way to further restrict the permissions for a federated user. You cannot use - // session policies to grant more permissions than those that are defined in the - // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User - // Guide. - // - // The resulting credentials can be used to access a resource that has a - // resource-based policy. If that policy specifically references the federated user - // session in the Principal element of the policy, the session has the permissions - // allowed by the policy. These permissions are granted in addition to the - // permissions that are granted by the session policies. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. - // - // You must pass an inline or managed [session policy] to this operation. You can pass a single - // JSON policy document to use as an inline session policy. You can also specify up - // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session - // policies. The plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. You can provide up to 10 managed policy - // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General - // Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection of - // the IAM user policies and the session policies that you pass. This gives you a - // way to further restrict the permissions for a federated user. You cannot use - // session policies to grant more permissions than those that are defined in the - // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User - // Guide. - // - // The resulting credentials can be used to access a resource that has a - // resource-based policy. If that policy specifically references the federated user - // session in the Principal element of the policy, the session has the permissions - // allowed by the policy. These permissions are granted in addition to the - // permissions that are granted by the session policies. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User - // Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters and the values can’t exceed 256 - // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user tag - // with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This means - // that you cannot have separate Department and department tag keys. Assume that - // the role has the Department = Marketing tag and you pass the department = - // engineering session tag. Department and department are not saved as separate - // tags, and the session tag passed in the request takes precedence over the role - // tag. - // - // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - Tags []types.Tag - - noSmithyDocumentSerde -} - -// Contains the response to a successful GetFederationToken request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type GetFederationTokenOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // Identifiers for the federated user associated with the credentials (such as - // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use - // the federated user's ARN in your resource-based policies, such as an Amazon S3 - // bucket policy. - FederatedUser *types.FederatedUser - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetFederationToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go deleted file mode 100644 index 25604699009d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ /dev/null @@ -1,275 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary credentials for an Amazon Web Services account or -// IAM user. The credentials consist of an access key ID, a secret access key, and -// a security token. Typically, you use GetSessionToken if you want to use MFA to -// protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . -// -// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is -// associated with their MFA device. Using the temporary security credentials that -// the call returns, IAM users can then make programmatic calls to API operations -// that require MFA authentication. An incorrect MFA code causes the API to return -// an access denied error. For a comparison of GetSessionToken with the other API -// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// No permissions are required for users to perform this operation. The purpose of -// the sts:GetSessionToken operation is to authenticate the user using MFA. You -// cannot use policies to control authentication operations. For more information, -// see [Permissions for GetSessionToken]in the IAM User Guide. -// -// # Session Duration -// -// The GetSessionToken operation must be called by using the long-term Amazon Web -// Services security credentials of an IAM user. Credentials that are created by -// IAM users are valid for the duration that you specify. This duration can range -// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), -// with a default of 43,200 seconds (12 hours). Credentials based on account -// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 -// hour), with a default of 1 hour. -// -// # Permissions -// -// The temporary security credentials created by GetSessionToken can be used to -// make API calls to any Amazon Web Services service with the following exceptions: -// -// - You cannot call any IAM API operations unless MFA authentication -// information is included in the request. -// -// - You cannot call any STS API except AssumeRole or GetCallerIdentity . -// -// The credentials that GetSessionToken returns are based on permissions -// associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. -// -// Although it is possible to call GetSessionToken using the security credentials -// of an Amazon Web Services account root user rather than an IAM user, we do not -// recommend it. If GetSessionToken is called using root user credentials, the -// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in -// the IAM User Guide -// -// For more information about using GetSessionToken to create temporary -// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. -// -// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html -// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken -// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { - if params == nil { - params = &GetSessionTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetSessionTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetSessionTokenInput struct { - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for - // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds - // (one hour). If the duration is longer than one hour, the session for Amazon Web - // Services account owners defaults to one hour. - DurationSeconds *int32 - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM user - // has a policy that requires MFA authentication. The value is either the serial - // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name - // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You - // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@:/- - SerialNumber *string - - // The value provided by the MFA device, if MFA is required. If any policy - // requires the IAM user to submit an MFA code, specify this value. If MFA - // authentication is required, the user must provide a code when requesting a set - // of temporary security credentials. A user who fails to provide the code receives - // an "access denied" response when requesting resources that require MFA - // authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string - - noSmithyDocumentSerde -} - -// Contains the response to a successful GetSessionToken request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type GetSessionTokenOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addCredentialSource(stack, options); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addInterceptBeforeRetryLoop(stack, options); err != nil { - return err - } - if err = addInterceptAttempt(stack, options); err != nil { - return err - } - if err = addInterceptExecution(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSerialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterSerialization(stack, options); err != nil { - return err - } - if err = addInterceptBeforeSigning(stack, options); err != nil { - return err - } - if err = addInterceptAfterSigning(stack, options); err != nil { - return err - } - if err = addInterceptTransmit(stack, options); err != nil { - return err - } - if err = addInterceptBeforeDeserialization(stack, options); err != nil { - return err - } - if err = addInterceptAfterDeserialization(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetSessionToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go deleted file mode 100644 index 2a81b3fb19de..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ /dev/null @@ -1,351 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "slices" - "strings" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sts") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - sorted := sortAuthOptions(options, m.options.AuthSchemePreference) - for _, option := range sorted { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { - byPriority := make([]*smithyauth.Option, 0, len(options)) - for _, prefName := range preferred { - for _, option := range options { - optName := option.SchemeID - if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { - optName = parts[1] - } - if prefName == optName { - byPriority = append(byPriority, option) - } - } - } - for _, option := range options { - if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { - return o.SchemeID == option.SchemeID - }) { - byPriority = append(byPriority, option) - } - } - return byPriority -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go deleted file mode 100644 index a1ac917ec6a0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ /dev/null @@ -1,2710 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strconv" - "strings" -) - -type awsAwsquery_deserializeOpAssumeRole struct { -} - -func (*awsAwsquery_deserializeOpAssumeRole) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) - } - output := &AssumeRoleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) - } - output := &AssumeRoleWithSAMLOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleWithSAMLResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("IDPRejectedClaim", errorCode): - return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) - - case strings.EqualFold("InvalidIdentityToken", errorCode): - return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) - } - output := &AssumeRoleWithWebIdentityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("IDPCommunicationError", errorCode): - return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) - - case strings.EqualFold("IDPRejectedClaim", errorCode): - return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) - - case strings.EqualFold("InvalidIdentityToken", errorCode): - return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoot struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoot) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoot(response, &metadata) - } - output := &AssumeRootOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRootResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRootOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { -} - -func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) - } - output := &DecodeAuthorizationMessageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("DecodeAuthorizationMessageResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): - return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetAccessKeyInfo struct { -} - -func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) - } - output := &GetAccessKeyInfoOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetAccessKeyInfoResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetCallerIdentity struct { -} - -func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) - } - output := &GetCallerIdentityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetCallerIdentityResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetFederationToken struct { -} - -func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) - } - output := &GetFederationTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetFederationTokenResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetSessionToken struct { -} - -func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) - } - output := &GetSessionTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetSessionTokenResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ExpiredTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IDPCommunicationErrorException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IDPRejectedClaimException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidAuthorizationMessageException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidIdentityTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.MalformedPolicyDocumentException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.PackedPolicyTooLargeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.RegionDisabledException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AssumedRoleUser - if *v == nil { - sv = &types.AssumedRoleUser{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("AssumedRoleId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AssumedRoleId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Credentials - if *v == nil { - sv = &types.Credentials{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessKeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessKeyId = ptr.String(xtv) - } - - case strings.EqualFold("Expiration", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Expiration = ptr.Time(t) - } - - case strings.EqualFold("SecretAccessKey", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SecretAccessKey = ptr.String(xtv) - } - - case strings.EqualFold("SessionToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SessionToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ExpiredTokenException - if *v == nil { - sv = &types.ExpiredTokenException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.FederatedUser - if *v == nil { - sv = &types.FederatedUser{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("FederatedUserId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.FederatedUserId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IDPCommunicationErrorException - if *v == nil { - sv = &types.IDPCommunicationErrorException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IDPRejectedClaimException - if *v == nil { - sv = &types.IDPRejectedClaimException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidAuthorizationMessageException - if *v == nil { - sv = &types.InvalidAuthorizationMessageException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidIdentityTokenException - if *v == nil { - sv = &types.InvalidIdentityTokenException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MalformedPolicyDocumentException - if *v == nil { - sv = &types.MalformedPolicyDocumentException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PackedPolicyTooLargeException - if *v == nil { - sv = &types.PackedPolicyTooLargeException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RegionDisabledException - if *v == nil { - sv = &types.RegionDisabledException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleOutput - if *v == nil { - sv = &AssumeRoleOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleWithSAMLOutput - if *v == nil { - sv = &AssumeRoleWithSAMLOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Audience", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Audience = ptr.String(xtv) - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Issuer", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Issuer = ptr.String(xtv) - } - - case strings.EqualFold("NameQualifier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NameQualifier = ptr.String(xtv) - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - case strings.EqualFold("Subject", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Subject = ptr.String(xtv) - } - - case strings.EqualFold("SubjectType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SubjectType = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleWithWebIdentityOutput - if *v == nil { - sv = &AssumeRoleWithWebIdentityOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Audience", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Audience = ptr.String(xtv) - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Provider", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Provider = ptr.String(xtv) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SubjectFromWebIdentityToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRootOutput - if *v == nil { - sv = &AssumeRootOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DecodeAuthorizationMessageOutput - if *v == nil { - sv = &DecodeAuthorizationMessageOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DecodedMessage", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DecodedMessage = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetAccessKeyInfoOutput - if *v == nil { - sv = &GetAccessKeyInfoOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetCallerIdentityOutput - if *v == nil { - sv = &GetCallerIdentityOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("UserId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UserId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetFederationTokenOutput - if *v == nil { - sv = &GetFederationTokenOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("FederatedUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetSessionTokenOutput - if *v == nil { - sv = &GetSessionTokenOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go deleted file mode 100644 index cbb19c7f6686..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package sts provides the API client, operations, and parameter types for AWS -// Security Token Service. -// -// # Security Token Service -// -// Security Token Service (STS) enables you to request temporary, -// limited-privilege credentials for users. This guide provides descriptions of the -// STS API. For more information about using this service, see [Temporary Security Credentials]. -// -// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html -package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go deleted file mode 100644 index dca2ce3599e4..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ /dev/null @@ -1,1136 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "sts" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string - - // Whether the global endpoint should be used, rather then the regional endpoint - // for us-east-1. - // - // Defaults to false if no value is - // provided. - // - // AWS::STS::UseGlobalEndpoint - UseGlobalEndpoint *bool -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - if p.UseGlobalEndpoint == nil { - return fmt.Errorf("parameter UseGlobalEndpoint is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - - if p.UseGlobalEndpoint == nil { - p.UseGlobalEndpoint = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - _UseGlobalEndpoint := *params.UseGlobalEndpoint - - if _UseGlobalEndpoint == true { - if !(params.Endpoint != nil) { - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == false { - if _UseDualStack == false { - if _Region == "ap-northeast-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-south-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-southeast-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-southeast-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "aws-global" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ca-central-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-central-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-north-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-3" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "sa-east-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-east-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-east-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-west-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-west-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - if _Region == "aws-global" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json deleted file mode 100644 index 86bb3b79be49..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", - "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_AssumeRole.go", - "api_op_AssumeRoleWithSAML.go", - "api_op_AssumeRoleWithWebIdentity.go", - "api_op_AssumeRoot.go", - "api_op_DecodeAuthorizationMessage.go", - "api_op_GetAccessKeyInfo.go", - "api_op_GetCallerIdentity.go", - "api_op_GetFederationToken.go", - "api_op_GetSessionToken.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "sra_operation_order_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.22", - "module": "github.com/aws/aws-sdk-go-v2/service/sts", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go deleted file mode 100644 index 931a5d81e11e..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package sts - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go deleted file mode 100644 index 3dfa51e5f4b2..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ /dev/null @@ -1,560 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver STS endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsEusc *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-7", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "aws-global", - }: endpoints.Endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "mx-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-west-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-2-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-eusc", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.eu", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsEusc, - IsRegionalized: true, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-iso-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-isob-east-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "eu-isoe-west-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-isof-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-isof-south-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.us-gov-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.us-gov-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go deleted file mode 100644 index f60b7d33815d..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ /dev/null @@ -1,239 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Client registry of operation interceptors. - Interceptors smithyhttp.InterceptorRegistry - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme - - // Priority list of preferred auth scheme names (e.g. sigv4a). - AuthSchemePreference []string -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - to.Interceptors = o.Interceptors.Copy() - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go deleted file mode 100644 index 96b222136bf0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ /dev/null @@ -1,1005 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/query" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "path" -) - -type awsAwsquery_serializeOpAssumeRole struct { -} - -func (*awsAwsquery_serializeOpAssumeRole) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRole") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoleWithSAML struct { -} - -func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoleWithSAML") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { -} - -func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoleWithWebIdentity") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoot struct { -} - -func (*awsAwsquery_serializeOpAssumeRoot) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRootInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoot") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRootInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { -} - -func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("DecodeAuthorizationMessage") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetAccessKeyInfo struct { -} - -func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetAccessKeyInfoInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetAccessKeyInfo") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetCallerIdentity struct { -} - -func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetCallerIdentityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetCallerIdentity") - body.Key("Version").String("2011-06-15") - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetFederationToken struct { -} - -func (*awsAwsquery_serializeOpGetFederationToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetFederationTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetFederationToken") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetSessionToken struct { -} - -func (*awsAwsquery_serializeOpGetSessionToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetSessionTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetSessionToken") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { - object := value.Object() - _ = object - - if v.Arn != nil { - objectKey := object.Key("arn") - objectKey.String(*v.Arn) - } - - return nil -} - -func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error { - object := value.Object() - _ = object - - if v.ContextAssertion != nil { - objectKey := object.Key("ContextAssertion") - objectKey.String(*v.ContextAssertion) - } - - if v.ProviderArn != nil { - objectKey := object.Key("ProviderArn") - objectKey.String(*v.ProviderArn) - } - - return nil -} - -func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { - object := value.Object() - _ = object - - if v.Key != nil { - objectKey := object.Key("Key") - objectKey.String(*v.Key) - } - - if v.Value != nil { - objectKey := object.Key("Value") - objectKey.String(*v.Value) - } - - return nil -} - -func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.ExternalId != nil { - objectKey := object.Key("ExternalId") - objectKey.String(*v.ExternalId) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.ProvidedContexts != nil { - objectKey := object.Key("ProvidedContexts") - if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil { - return err - } - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.RoleSessionName != nil { - objectKey := object.Key("RoleSessionName") - objectKey.String(*v.RoleSessionName) - } - - if v.SerialNumber != nil { - objectKey := object.Key("SerialNumber") - objectKey.String(*v.SerialNumber) - } - - if v.SourceIdentity != nil { - objectKey := object.Key("SourceIdentity") - objectKey.String(*v.SourceIdentity) - } - - if v.Tags != nil { - objectKey := object.Key("Tags") - if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { - return err - } - } - - if v.TokenCode != nil { - objectKey := object.Key("TokenCode") - objectKey.String(*v.TokenCode) - } - - if v.TransitiveTagKeys != nil { - objectKey := object.Key("TransitiveTagKeys") - if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.PrincipalArn != nil { - objectKey := object.Key("PrincipalArn") - objectKey.String(*v.PrincipalArn) - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.SAMLAssertion != nil { - objectKey := object.Key("SAMLAssertion") - objectKey.String(*v.SAMLAssertion) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.ProviderId != nil { - objectKey := object.Key("ProviderId") - objectKey.String(*v.ProviderId) - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.RoleSessionName != nil { - objectKey := object.Key("RoleSessionName") - objectKey.String(*v.RoleSessionName) - } - - if v.WebIdentityToken != nil { - objectKey := object.Key("WebIdentityToken") - objectKey.String(*v.WebIdentityToken) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.TargetPrincipal != nil { - objectKey := object.Key("TargetPrincipal") - objectKey.String(*v.TargetPrincipal) - } - - if v.TaskPolicyArn != nil { - objectKey := object.Key("TaskPolicyArn") - if err := awsAwsquery_serializeDocumentPolicyDescriptorType(v.TaskPolicyArn, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { - object := value.Object() - _ = object - - if v.EncodedMessage != nil { - objectKey := object.Key("EncodedMessage") - objectKey.String(*v.EncodedMessage) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { - object := value.Object() - _ = object - - if v.AccessKeyId != nil { - objectKey := object.Key("AccessKeyId") - objectKey.String(*v.AccessKeyId) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { - object := value.Object() - _ = object - - return nil -} - -func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Name != nil { - objectKey := object.Key("Name") - objectKey.String(*v.Name) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.Tags != nil { - objectKey := object.Key("Tags") - if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.SerialNumber != nil { - objectKey := object.Key("SerialNumber") - objectKey.String(*v.SerialNumber) - } - - if v.TokenCode != nil { - objectKey := object.Key("TokenCode") - objectKey.String(*v.TokenCode) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go deleted file mode 100644 index 041629bba2cb..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ /dev/null @@ -1,248 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The web identity token that was passed is expired or is not valid. Get a new -// identity token from the identity provider and then retry the request. -type ExpiredTokenException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ExpiredTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ExpiredTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ExpiredTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ExpiredTokenException" - } - return *e.ErrorCodeOverride -} -func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request could not be fulfilled because the identity provider (IDP) that was -// asked to verify the incoming identity token could not be reached. This is often -// a transient error caused by network conditions. Retry the request a limited -// number of times so that you don't exceed the request rate. If the error -// persists, the identity provider might be down or not responding. -type IDPCommunicationErrorException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IDPCommunicationErrorException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IDPCommunicationErrorException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IDPCommunicationErrorException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IDPCommunicationError" - } - return *e.ErrorCodeOverride -} -func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it can -// also mean that the claim has expired or has been explicitly revoked. -type IDPRejectedClaimException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IDPRejectedClaimException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IDPRejectedClaimException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IDPRejectedClaimException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IDPRejectedClaim" - } - return *e.ErrorCodeOverride -} -func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as line -// breaks, or if the message has expired. -type InvalidAuthorizationMessageException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidAuthorizationMessageException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidAuthorizationMessageException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidAuthorizationMessageException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidAuthorizationMessageException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry the -// request. -type InvalidIdentityTokenException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidIdentityTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidIdentityTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidIdentityTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidIdentityToken" - } - return *e.ErrorCodeOverride -} -func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -type MalformedPolicyDocumentException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *MalformedPolicyDocumentException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *MalformedPolicyDocumentException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *MalformedPolicyDocumentException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "MalformedPolicyDocument" - } - return *e.ErrorCodeOverride -} -func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session tags -// into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. -// -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length -type PackedPolicyTooLargeException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PackedPolicyTooLargeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PackedPolicyTooLargeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PackedPolicyTooLargeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PackedPolicyTooLarge" - } - return *e.ErrorCodeOverride -} -func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM -// User Guide. -// -// [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html -type RegionDisabledException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RegionDisabledException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RegionDisabledException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RegionDisabledException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RegionDisabledException" - } - return *e.ErrorCodeOverride -} -func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go deleted file mode 100644 index dff7a3c2e769..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - - // The ARN of the temporary security credentials that are returned from the AssumeRole - // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in - // the IAM User Guide. - // - // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html - // - // This member is required. - Arn *string - - // A unique identifier that contains the role ID and the role session name of the - // role that is being assumed. The role ID is generated by Amazon Web Services when - // the role is created. - // - // This member is required. - AssumedRoleId *string - - noSmithyDocumentSerde -} - -// Amazon Web Services credentials for API authentication. -type Credentials struct { - - // The access key ID that identifies the temporary security credentials. - // - // This member is required. - AccessKeyId *string - - // The date on which the current credentials expire. - // - // This member is required. - Expiration *time.Time - - // The secret access key that can be used to sign requests. - // - // This member is required. - SecretAccessKey *string - - // The token that users must pass to the service API to use the temporary - // credentials. - // - // This member is required. - SessionToken *string - - noSmithyDocumentSerde -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - - // The ARN that specifies the federated user that is associated with the - // credentials. For more information about ARNs and how to use them in policies, - // see [IAM Identifiers]in the IAM User Guide. - // - // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html - // - // This member is required. - Arn *string - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // This member is required. - FederatedUserId *string - - noSmithyDocumentSerde -} - -// A reference to the IAM managed policy that is passed as a session policy for a -// role session or a federated user session. -type PolicyDescriptorType struct { - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web - // Services General Reference. - // - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - Arn *string - - noSmithyDocumentSerde -} - -// Contains information about the provided context. This includes the signed and -// encrypted trusted context assertion and the context provider ARN from which the -// trusted context assertion was generated. -type ProvidedContext struct { - - // The signed and encrypted trusted context assertion generated by the context - // provider. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. - ContextAssertion *string - - // The context provider ARN from which the trusted context assertion was generated. - ProviderArn *string - - noSmithyDocumentSerde -} - -// You can pass custom key-value pair attributes when you assume a role or -// federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User -// Guide. -// -// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -type Tag struct { - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User - // Guide. - // - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - // - // This member is required. - Key *string - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User - // Guide. - // - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go deleted file mode 100644 index 1026e22118d0..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go +++ /dev/null @@ -1,347 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpAssumeRole struct { -} - -func (*validateOpAssumeRole) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoleWithSAML struct { -} - -func (*validateOpAssumeRoleWithSAML) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoleWithWebIdentity struct { -} - -func (*validateOpAssumeRoleWithWebIdentity) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoot struct { -} - -func (*validateOpAssumeRoot) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRootInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRootInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDecodeAuthorizationMessage struct { -} - -func (*validateOpDecodeAuthorizationMessage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetAccessKeyInfo struct { -} - -func (*validateOpGetAccessKeyInfo) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetAccessKeyInfoInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetAccessKeyInfoInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetFederationToken struct { -} - -func (*validateOpGetFederationToken) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetFederationTokenInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetFederationTokenInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) -} - -func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) -} - -func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) -} - -func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After) -} - -func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) -} - -func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) -} - -func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) -} - -func validateTag(v *types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tag"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Value == nil { - invalidParams.Add(smithy.NewErrParamRequired("Value")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagListType(v []types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagListType"} - for i := range v { - if err := validateTag(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleInput(v *AssumeRoleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.RoleSessionName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) - } - if v.Tags != nil { - if err := validateTagListType(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.PrincipalArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) - } - if v.SAMLAssertion == nil { - invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.RoleSessionName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) - } - if v.WebIdentityToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRootInput(v *AssumeRootInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRootInput"} - if v.TargetPrincipal == nil { - invalidParams.Add(smithy.NewErrParamRequired("TargetPrincipal")) - } - if v.TaskPolicyArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("TaskPolicyArn")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} - if v.EncodedMessage == nil { - invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} - if v.AccessKeyId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} - if v.Name == nil { - invalidParams.Add(smithy.NewErrParamRequired("Name")) - } - if v.Tags != nil { - if err := validateTagListType(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore deleted file mode 100644 index 2518b3491549..000000000000 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Eclipse -.classpath -.project -.settings/ - -# Intellij -.idea/ -*.iml -*.iws - -# Mac -.DS_Store - -# Maven -target/ -**/dependency-reduced-pom.xml - -# Gradle -/.gradle -build/ -*/out/ -*/*/out/ - -# VS Code -bin/ -.vscode/ - -# make -c.out diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml deleted file mode 100644 index f8d1035cc332..000000000000 --- a/vendor/github.com/aws/smithy-go/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -sudo: true -dist: bionic - -branches: - only: - - main - -os: - - linux - - osx - # Travis doesn't work with windows and Go tip - #- windows - -go: - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi - - (cd /tmp/; go get golang.org/x/lint/golint) - -script: - - make go test -v ./...; - diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md deleted file mode 100644 index 1d60def6d1b3..000000000000 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ /dev/null @@ -1,322 +0,0 @@ -# Release (2025-07-24) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.5 - * **Bug Fix**: Fix HTTP metrics data race. - * **Bug Fix**: Replace usages of deprecated ioutil package. - -# Release (2025-06-16) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.4 - * **Bug Fix**: Fix CBOR serd empty check for string and enum fields - * **Bug Fix**: Fix HTTP metrics data race. - * **Bug Fix**: Replace usages of deprecated ioutil package. - -# Release (2025-02-17) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.3 - * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy. - -# Release (2025-01-21) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.2 - * **Bug Fix**: Fix HTTP metrics data race. - * **Bug Fix**: Replace usages of deprecated ioutil package. - -# Release (2024-11-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.1 - * **Bug Fix**: Fix failure to replace URI path segments when their names overlap. - -# Release (2024-10-03) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.0 - * **Feature**: Add HTTP client metrics. - -# Release (2024-09-25) - -## Module Highlights -* `github.com/aws/smithy-go/aws-http-auth`: [v1.0.0](aws-http-auth/CHANGELOG.md#v100-2024-09-25) - * **Release**: Initial release of module aws-http-auth, which implements generically consumable SigV4 and SigV4a request signing. - -# Release (2024-09-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.21.0 - * **Feature**: Add tracing and metrics APIs, and builtin instrumentation for both, in generated clients. -* `github.com/aws/smithy-go/metrics/smithyotelmetrics`: [v1.0.0](metrics/smithyotelmetrics/CHANGELOG.md#v100-2024-09-19) - * **Release**: Initial release of `smithyotelmetrics` module, which is used to adapt an OpenTelemetry SDK meter provider to be used with Smithy clients. -* `github.com/aws/smithy-go/tracing/smithyoteltracing`: [v1.0.0](tracing/smithyoteltracing/CHANGELOG.md#v100-2024-09-19) - * **Release**: Initial release of `smithyoteltracing` module, which is used to adapt an OpenTelemetry SDK tracer provider to be used with Smithy clients. - -# Release (2024-08-14) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.4 - * **Dependency Update**: Bump minimum Go version to 1.21. - -# Release (2024-06-27) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.3 - * **Bug Fix**: Fix encoding/cbor test overflow on x86. - -# Release (2024-03-29) - -* No change notes available for this release. - -# Release (2024-02-21) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.1 - * **Bug Fix**: Remove runtime dependency on go-cmp. - -# Release (2024-02-13) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.0 - * **Feature**: Add codegen definition for sigv4a trait. - * **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# Release (2023-12-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.19.0 - * **Feature**: Support modeled request compression. - -# Release (2023-11-30) - -* No change notes available for this release. - -# Release (2023-11-29) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.18.0 - * **Feature**: Expose Options() method on generated service clients. - -# Release (2023-11-15) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.17.0 - * **Feature**: Support identity/auth components of client reference architecture. - -# Release (2023-10-31) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.16.0 - * **Feature**: **LANG**: Bump minimum go version to 1.19. - -# Release (2023-10-06) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.15.0 - * **Feature**: Add `http.WithHeaderComment` middleware. - -# Release (2023-08-18) - -* No change notes available for this release. - -# Release (2023-08-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.14.1 - * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. - -# Release (2023-07-31) - -## General Highlights -* **Feature**: Adds support for smithy-modeled endpoint resolution. - -# Release (2022-12-02) - -* No change notes available for this release. - -# Release (2022-10-24) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.4 - * **Bug Fix**: fixed document type checking for encoding nested types - -# Release (2022-09-14) - -* No change notes available for this release. - -# Release (v1.13.2) - -* No change notes available for this release. - -# Release (v1.13.1) - -* No change notes available for this release. - -# Release (v1.13.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.0 - * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. - -# Release (v1.12.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.1 - * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. - -# Release (v1.12.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.0 - * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. - -# Release (v1.11.3) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.3 - * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. - -# Release (v1.11.2) - -* No change notes available for this release. - -# Release (v1.11.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.1 - * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) - -# Release (v1.11.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.0 - * **Feature**: Updates deserialization of header list to supported quoted strings - -# Release (v1.10.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.10.0 - * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. - -# Release (v1.9.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.1 - * **Documentation**: Fixes various typos in Go package documentation. - -# Release (v1.9.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.0 - * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. - * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. - -# Release v1.8.1 - -### Smithy Go Module -* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. - * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) - -# Release v1.8.0 - -### Smithy Go Module - -* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) - * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. - * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) - -# Release v1.7.0 - -### Smithy Go Module -* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) - * Handle error for defer close call -* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) - * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. -* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) - -### Codegen -* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) - * Adds support for Smithy Document shapes and supporting types for protocols to implement support - -# Release v1.6.0 (2021-07-15) - -### Smithy Go Module -* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) - -### Codegen -* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) -* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) - -# Release v1.5.0 (2021-06-25) - -### Smithy Go module -* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) - * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. - -### Codegen -* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) -* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) -* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) -* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) -* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) -* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) - -# Release v1.4.0 (2021-05-06) - -### Smithy Go module -* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) - -### Codegen -* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) -* Add support for httpQueryParams location -* Add support for model renaming conflict resolution with service closure - -# Release v1.3.1 (2021-04-08) - -### Smithy Go module -* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) -* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) - -# Release v1.3.0 (2021-04-01) - -### Smithy Go module -* `transport/http`: Add utility to safely join string to url path, and url raw query. - -### Codegen -* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. - -# Release v1.2.0 (2021-03-12) - -### Smithy Go module -* Fix support for parsing shortened year format in HTTP Date header. -* Fix GitHub APIDiff action workflow to get gorelease tool correctly. -* Fix codegen artifact unit test for Go 1.16 - -### Codegen -* Fix generating paginator nil parameter handling before usage. -* Fix Serialize unboxed members decorated as required. -* Add ability to define resolvers at both client construction and operation invocation. -* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa60b5..000000000000 --- a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md deleted file mode 100644 index 1f8d01ff6abd..000000000000 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ /dev/null @@ -1,90 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *main* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - -### Changelog Documents - -(You can SKIP this step if you are only changing the code generator, and not the runtime). - -When submitting a pull request please include a changelog file on a folder named `.changelog`. -These are used to generate the content `CHANGELOG.md` and Release Notes. The format of the file is as follows: - -``` -{ - "id": "12345678-1234-1234-1234-123456789012" - "type": "bugfix" - "collapse": true - "description": "Fix improper use of printf-style functions.", - "modules": [ - "." - ] -} -``` - -* id: a UUID. This should also be used for the name of the file, so if your id is `12345678-1234-1234-1234-123456789012` the file should be named `12345678-1234-1234-1234-123456789012.json/` -* type: one of the following: - * bugfix: Fixing an existing bug - * Feature: Adding a new feature to an existing service - * Release: Releasing a new module - * Dependency: Updating dependencies - * Announcement: Making an announcement, like deprecation of a module -* collapse: whether this change should appear separately on the release notes on every module listed on `modules` (`"collapse": false`), or if it should show up as a single entry (`"collapse": true`) - * For the smithy-go repository this should always be `false` -* description: Description of this change. Most of the times is the same as the title of the PR -* modules: which Go modules does this change impact. The root module is expressed as "." - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE deleted file mode 100644 index 67db8588217f..000000000000 --- a/vendor/github.com/aws/smithy-go/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile deleted file mode 100644 index 34b17ab2fe09..000000000000 --- a/vendor/github.com/aws/smithy-go/Makefile +++ /dev/null @@ -1,125 +0,0 @@ -PRE_RELEASE_VERSION ?= - -RELEASE_MANIFEST_FILE ?= -RELEASE_CHGLOG_DESC_FILE ?= - -REPOTOOLS_VERSION ?= latest -REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools -REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= -REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} - -UNIT_TEST_TAGS= -BUILD_TAGS= - -ifneq ($(PRE_RELEASE_VERSION),) - REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} -endif - -smithy-publish-local: - cd codegen && ./gradlew publishToMavenLocal - -smithy-build: - cd codegen && ./gradlew build - -smithy-clean: - cd codegen && ./gradlew clean - -GRADLE_RETRIES := 3 -GRADLE_SLEEP := 2 - -# We're making a call to ./gradlew to trigger downloading Gradle and -# starting the daemon. Any call works, so using `./gradlew help` -ensure-gradle-up: - @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \ - echo "Checking if Gradle daemon is up, attempt $$i..."; \ - if ./gradlew help; then \ - echo "Gradle daemon is up!"; \ - exit 0; \ - fi; \ - echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \ - sleep $(GRADLE_SLEEP); \ - done; \ - echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \ - exit 1 - -################## -# Linting/Verify # -################## -.PHONY: verify vet cover - -verify: vet - -vet: - go vet ${BUILD_TAGS} --all ./... - -cover: - go test ${BUILD_TAGS} -coverprofile c.out ./... - @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \ - echo "total (statements): $$cover%"; - -################ -# Unit Testing # -################ -.PHONY: unit unit-race unit-test unit-race-test - -unit: verify - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race: verify - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -unit-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -##################### -# Release Process # -##################### -.PHONY: preview-release pre-release-validation release - -preview-release: - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - -pre-release-validation: - @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ - echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ - fi - @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ - echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ - fi - -release: pre-release-validation - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} - go run ${REPOTOOLS_CMD_CHANGELOG} rm -all - go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} - -module-version: - @go run ${REPOTOOLS_CMD_MODULE_VERSION} . - -############## -# Repo Tools # -############## -.PHONY: install-changelog - -external-changelog: - mkdir -p .changelog - cp changelog-template.json .changelog/00000000-0000-0000-0000-000000000000.json - @echo "Generate a new UUID and update the file at .changelog/00000000-0000-0000-0000-000000000000.json" - @echo "Make sure to rename the file with your new id, like .changelog/12345678-1234-1234-1234-123456789012.json" - @echo "See CONTRIBUTING.md 'Changelog Documents' and an example at https://github.com/aws/smithy-go/pull/543/files" - -install-changelog: - go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE deleted file mode 100644 index 616fc5889451..000000000000 --- a/vendor/github.com/aws/smithy-go/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md deleted file mode 100644 index c9ba5ea5e4b0..000000000000 --- a/vendor/github.com/aws/smithy-go/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Smithy Go - -[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) - -[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. - -The smithy-go runtime requires a minimum version of Go 1.22. - -**WARNING: All interfaces are subject to change.** - -## Can I use the code generators? - -In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), -such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), -in order to generate transport mechanisms and serialization/deserialization -code ("serde") accordingly. - -The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, -therefore the useability of this project on its own is currently limited. -Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) -exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are -tracking the movement of those out of the SDK into smithy-go in -[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no -timeline for doing so. - -## Plugins - -This repository implements the following Smithy build plugins: - -| ID | GAV prefix | Description | -|----|------------|-------------| -| `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | -| `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | - -**NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** - -## `go-codegen` - -### Configuration - -[`GoSettings`](codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/GoSettings.java) -contains all of the settings enabled from `smithy-build.json` and helper -methods and types. The up-to-date list of top-level properties enabled for -`go-client-codegen` can be found in `GoSettings::from()`. - -| Setting | Type | Required | Description | -|-----------------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------| -| `service` | string | yes | The Shape ID of the service for which to generate the client. | -| `module` | string | yes | Name of the module in `generated.json` (and `go.mod` if `generateGoMod` is enabled) and `doc.go`. | -| `generateGoMod` | boolean | | Whether to generate a default `go.mod` file. The default value is `false`. | -| `goDirective` | string | | [Go directive](https://go.dev/ref/mod#go-mod-file-go) of the module. The default value is the minimum supported Go version. | - -### Supported protocols - -| Protocol | Notes | -|----------|-------| -| [`smithy.protocols#rpcv2Cbor`](https://smithy.io/2.0/additional-specs/protocols/smithy-rpc-v2.html) | Event streaming not yet implemented. | - -### Example - -This example applies the `go-codegen` build plugin to the Smithy quickstart -example created from `smithy init`: - -```json -{ - "version": "1.0", - "sources": [ - "models" - ], - "maven": { - "dependencies": [ - "software.amazon.smithy.go:smithy-go-codegen:0.1.0" - ] - }, - "plugins": { - "go-codegen": { - "service": "example.weather#Weather", - "module": "github.com/example/weather", - "generateGoMod": true, - "goDirective": "1.22" - } - } -} -``` - -## `go-server-codegen` - -This plugin is a work-in-progress and is currently undocumented. - -## License - -This project is licensed under the Apache-2.0 License. - diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go deleted file mode 100644 index 5bdb70c9a781..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/auth.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package auth defines protocol-agnostic authentication types for smithy -// clients. -package auth diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go deleted file mode 100644 index 1c9b9715cb00..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package bearer provides middleware and utilities for authenticating API -// operation calls with a Bearer Token. -package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go deleted file mode 100644 index 8c7d72099597..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go +++ /dev/null @@ -1,104 +0,0 @@ -package bearer - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Message is the middleware stack's request transport message value. -type Message interface{} - -// Signer provides an interface for implementations to decorate a request -// message with a bearer token. The signer is responsible for validating the -// message type is compatible with the signer. -type Signer interface { - SignWithBearerToken(context.Context, Token, Message) (Message, error) -} - -// AuthenticationMiddleware provides the Finalize middleware step for signing -// an request message with a bearer token. -type AuthenticationMiddleware struct { - signer Signer - tokenProvider TokenProvider -} - -// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the -// middleware Stack in the Finalize step with the options provided. -func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { - return s.Finalize.Add( - NewAuthenticationMiddleware(signer, tokenProvider), - middleware.After, - ) -} - -// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. -func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { - return &AuthenticationMiddleware{ - signer: signer, - tokenProvider: tokenProvider, - } -} - -const authenticationMiddlewareID = "BearerTokenAuthentication" - -// ID returns the resolver identifier -func (m *AuthenticationMiddleware) ID() string { - return authenticationMiddlewareID -} - -// HandleFinalize implements the FinalizeMiddleware interface in order to -// update the request with bearer token authentication. -func (m *AuthenticationMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - token, err := m.tokenProvider.RetrieveBearerToken(ctx) - if err != nil { - return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) - } - - signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) - if err != nil { - return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) - } - - in.Request = signedMessage - return next.HandleFinalize(ctx, in) -} - -// SignHTTPSMessage provides a bearer token authentication implementation that -// will sign the message with the provided bearer token. -// -// Will fail if the message is not a smithy-go HTTP request or the request is -// not HTTPS. -type SignHTTPSMessage struct{} - -// NewSignHTTPSMessage returns an initialized signer for HTTP messages. -func NewSignHTTPSMessage() *SignHTTPSMessage { - return &SignHTTPSMessage{} -} - -// SignWithBearerToken returns a copy of the HTTP request with the bearer token -// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. -// -// Returns an error if the request's URL scheme is not HTTPS, or the request -// message is not an smithy-go HTTP Request pointer type. -func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { - req, ok := message.(*smithyhttp.Request) - if !ok { - return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) - } - - if !req.IsHTTPS() { - return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") - } - - reqClone := req.Clone() - reqClone.Header.Set("Authorization", "Bearer "+token.Value) - - return reqClone, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go deleted file mode 100644 index be260d4c764a..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/token.go +++ /dev/null @@ -1,50 +0,0 @@ -package bearer - -import ( - "context" - "time" -) - -// Token provides a type wrapping a bearer token and expiration metadata. -type Token struct { - Value string - - CanExpire bool - Expires time.Time -} - -// Expired returns if the token's Expires time is before or equal to the time -// provided. If CanExpires is false, Expired will always return false. -func (t Token) Expired(now time.Time) bool { - if !t.CanExpire { - return false - } - now = now.Round(0) - return now.Equal(t.Expires) || now.After(t.Expires) -} - -// TokenProvider provides interface for retrieving bearer tokens. -type TokenProvider interface { - RetrieveBearerToken(context.Context) (Token, error) -} - -// TokenProviderFunc provides a helper utility to wrap a function as a type -// that implements the TokenProvider interface. -type TokenProviderFunc func(context.Context) (Token, error) - -// RetrieveBearerToken calls the wrapped function, returning the Token or -// error. -func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { - return fn(ctx) -} - -// StaticTokenProvider provides a utility for wrapping a static bearer token -// value within an implementation of a token provider. -type StaticTokenProvider struct { - Token Token -} - -// RetrieveBearerToken returns the static token specified. -func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { - return s.Token, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go deleted file mode 100644 index 223ddf52bba8..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go +++ /dev/null @@ -1,208 +0,0 @@ -package bearer - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - smithycontext "github.com/aws/smithy-go/context" - "github.com/aws/smithy-go/internal/sync/singleflight" -) - -// package variable that can be override in unit tests. -var timeNow = time.Now - -// TokenCacheOptions provides a set of optional configuration options for the -// TokenCache TokenProvider. -type TokenCacheOptions struct { - // The duration before the token will expire when the credentials will be - // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls - // will be blocking. - // - // Asynchronous refreshes are deduplicated, and only one will be in-flight - // at a time. If the token expires while an asynchronous refresh is in - // flight, the next call to RetrieveBearerToken will block on that refresh - // to return. - RefreshBeforeExpires time.Duration - - // The timeout the underlying TokenProvider's RetrieveBearerToken call must - // return within, or will be canceled. Defaults to 0, no timeout. - // - // If 0 timeout, its possible for the underlying tokenProvider's - // RetrieveBearerToken call to block forever. Preventing subsequent - // TokenCache attempts to refresh the token. - // - // If this timeout is reached all pending deduplicated calls to - // TokenCache RetrieveBearerToken will fail with an error. - RetrieveBearerTokenTimeout time.Duration - - // The minimum duration between asynchronous refresh attempts. If the next - // asynchronous recent refresh attempt was within the minimum delay - // duration, the call to retrieve will return the current cached token, if - // not expired. - // - // The asynchronous retrieve is deduplicated across multiple calls when - // RetrieveBearerToken is called. The asynchronous retrieve is not a - // periodic task. It is only performed when the token has not yet expired, - // and the current item is within the RefreshBeforeExpires window, and the - // TokenCache's RetrieveBearerToken method is called. - // - // If 0, (default) there will be no minimum delay between asynchronous - // refresh attempts. - // - // If DisableAsyncRefresh is true, this option is ignored. - AsyncRefreshMinimumDelay time.Duration - - // Sets if the TokenCache will attempt to refresh the token in the - // background asynchronously instead of blocking for credentials to be - // refreshed. If disabled token refresh will be blocking. - // - // The first call to RetrieveBearerToken will always be blocking, because - // there is no cached token. - DisableAsyncRefresh bool -} - -// TokenCache provides an utility to cache Bearer Authentication tokens from a -// wrapped TokenProvider. The TokenCache can be has options to configure the -// cache's early and asynchronous refresh of the token. -type TokenCache struct { - options TokenCacheOptions - provider TokenProvider - - cachedToken atomic.Value - lastRefreshAttemptTime atomic.Value - sfGroup singleflight.Group -} - -// NewTokenCache returns a initialized TokenCache that implements the -// TokenProvider interface. Wrapping the provider passed in. Also taking a set -// of optional functional option parameters to configure the token cache. -func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { - var options TokenCacheOptions - for _, fn := range optFns { - fn(&options) - } - - return &TokenCache{ - options: options, - provider: provider, - } -} - -// RetrieveBearerToken returns the token if it could be obtained, or error if a -// valid token could not be retrieved. -// -// The passed in Context's cancel/deadline/timeout will impacting only this -// individual retrieve call and not any other already queued up calls. This -// means underlying provider's RetrieveBearerToken calls could block for ever, -// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to -// provide a timeout, preventing the underlying TokenProvider blocking forever. -// -// By default, if the passed in Context is canceled, all of its values will be -// considered expired. The wrapped TokenProvider will not be able to lookup the -// values from the Context once it is expired. This is done to protect against -// expired values no longer being valid. To disable this behavior, use -// smithy-go's context.WithPreserveExpiredValues to add a value to the Context -// before calling RetrieveBearerToken to enable support for expired values. -// -// Without RetrieveBearerTokenTimeout there is the potential for a underlying -// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent -// attempts at refreshing the token. -func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { - cachedToken, ok := p.getCachedToken() - if !ok || cachedToken.Expired(timeNow()) { - return p.refreshBearerToken(ctx) - } - - // Check if the token should be refreshed before it expires. - refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) - if !refreshToken { - return cachedToken, nil - } - - if p.options.DisableAsyncRefresh { - return p.refreshBearerToken(ctx) - } - - p.tryAsyncRefresh(ctx) - - return cachedToken, nil -} - -// tryAsyncRefresh attempts to asynchronously refresh the token returning the -// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and -// the duration since the last refresh is less than that value, nothing will be -// done. -func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { - if p.options.AsyncRefreshMinimumDelay != 0 { - var lastRefreshAttempt time.Time - if v := p.lastRefreshAttemptTime.Load(); v != nil { - lastRefreshAttempt = v.(time.Time) - } - - if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { - return - } - } - - // Ignore the returned channel so this won't be blocking, and limit the - // number of additional goroutines created. - p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { - res, err := p.refreshBearerToken(ctx) - if p.options.AsyncRefreshMinimumDelay != 0 { - var refreshAttempt time.Time - if err != nil { - refreshAttempt = timeNow() - } - p.lastRefreshAttemptTime.Store(refreshAttempt) - } - - return res, err - }) -} - -func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { - resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { - ctx := smithycontext.WithSuppressCancel(ctx) - if v := p.options.RetrieveBearerTokenTimeout; v != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, v) - defer cancel() - } - return p.singleRetrieve(ctx) - }) - - select { - case res := <-resCh: - return res.Val.(Token), res.Err - case <-ctx.Done(): - return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) - } -} - -func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { - token, err := p.provider.RetrieveBearerToken(ctx) - if err != nil { - return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) - } - - p.cachedToken.Store(&token) - return token, nil -} - -// getCachedToken returns the currently cached token and true if found. Returns -// false if no token is cached. -func (p *TokenCache) getCachedToken() (Token, bool) { - v := p.cachedToken.Load() - if v == nil { - return Token{}, false - } - - t := v.(*Token) - if t == nil || t.Value == "" { - return Token{}, false - } - - return *t, true -} diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go deleted file mode 100644 index ba8cf70d4d44..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/identity.go +++ /dev/null @@ -1,47 +0,0 @@ -package auth - -import ( - "context" - "time" - - "github.com/aws/smithy-go" -) - -// Identity contains information that identifies who the user making the -// request is. -type Identity interface { - Expiration() time.Time -} - -// IdentityResolver defines the interface through which an Identity is -// retrieved. -type IdentityResolver interface { - GetIdentity(context.Context, smithy.Properties) (Identity, error) -} - -// IdentityResolverOptions defines the interface through which an entity can be -// queried to retrieve an IdentityResolver for a given auth scheme. -type IdentityResolverOptions interface { - GetIdentityResolver(schemeID string) IdentityResolver -} - -// AnonymousIdentity is a sentinel to indicate no identity. -type AnonymousIdentity struct{} - -var _ Identity = (*AnonymousIdentity)(nil) - -// Expiration returns the zero value for time, as anonymous identity never -// expires. -func (*AnonymousIdentity) Expiration() time.Time { - return time.Time{} -} - -// AnonymousIdentityResolver returns AnonymousIdentity. -type AnonymousIdentityResolver struct{} - -var _ IdentityResolver = (*AnonymousIdentityResolver)(nil) - -// GetIdentity returns AnonymousIdentity. -func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) { - return &AnonymousIdentity{}, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go deleted file mode 100644 index d5dabff04bf0..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/option.go +++ /dev/null @@ -1,25 +0,0 @@ -package auth - -import "github.com/aws/smithy-go" - -type ( - authOptionsKey struct{} -) - -// Option represents a possible authentication method for an operation. -type Option struct { - SchemeID string - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -// GetAuthOptions gets auth Options from Properties. -func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) { - v, ok := p.Get(authOptionsKey{}).([]*Option) - return v, ok -} - -// SetAuthOptions sets auth Options on Properties. -func SetAuthOptions(p *smithy.Properties, options []*Option) { - p.Set(authOptionsKey{}, options) -} diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go deleted file mode 100644 index fb6a57c640fc..000000000000 --- a/vendor/github.com/aws/smithy-go/auth/scheme_id.go +++ /dev/null @@ -1,20 +0,0 @@ -package auth - -// Anonymous -const ( - SchemeIDAnonymous = "smithy.api#noAuth" -) - -// HTTP auth schemes -const ( - SchemeIDHTTPBasic = "smithy.api#httpBasicAuth" - SchemeIDHTTPDigest = "smithy.api#httpDigestAuth" - SchemeIDHTTPBearer = "smithy.api#httpBearerAuth" - SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth" -) - -// AWS auth schemes -const ( - SchemeIDSigV4 = "aws.auth#sigv4" - SchemeIDSigV4A = "aws.auth#sigv4a" -) diff --git a/vendor/github.com/aws/smithy-go/changelog-template.json b/vendor/github.com/aws/smithy-go/changelog-template.json deleted file mode 100644 index d36e2b3e1aac..000000000000 --- a/vendor/github.com/aws/smithy-go/changelog-template.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id": "00000000-0000-0000-0000-000000000000", - "type": "feature|bugfix|dependency", - "description": "Description of your changes", - "collapse": false, - "modules": [ - "." - ] -} diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go deleted file mode 100644 index 69af87751969..000000000000 --- a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package cache defines the interface for a key-based data store. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package cache - -// Cache defines the interface for an opaquely-typed, key-based data store. -// -// The thread-safety of this interface is undefined and is dictated by -// implementations. -type Cache interface { - // Retrieve the value associated with the given key. The returned boolean - // indicates whether the cache held a value for the given key. - Get(k interface{}) (interface{}, bool) - - // Store a value under the given key. - Put(k interface{}, v interface{}) -} diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go deleted file mode 100644 index 02ecb0a32772..000000000000 --- a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package lru implements [cache.Cache] with an LRU eviction policy. -// -// This implementation is NOT thread-safe. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package lru - -import ( - "container/list" - - "github.com/aws/smithy-go/container/private/cache" -) - -// New creates a new LRU cache with the given capacity. -func New(cap int) cache.Cache { - return &lru{ - entries: make(map[interface{}]*list.Element, cap), - cap: cap, - mru: list.New(), - } -} - -type lru struct { - entries map[interface{}]*list.Element - cap int - - mru *list.List // least-recently used is at the back -} - -type element struct { - key interface{} - value interface{} -} - -func (l *lru) Get(k interface{}) (interface{}, bool) { - e, ok := l.entries[k] - if !ok { - return nil, false - } - - l.mru.MoveToFront(e) - return e.Value.(*element).value, true -} - -func (l *lru) Put(k interface{}, v interface{}) { - if len(l.entries) == l.cap { - l.evict() - } - - ev := &element{ - key: k, - value: v, - } - e := l.mru.PushFront(ev) - l.entries[k] = e -} - -func (l *lru) evict() { - e := l.mru.Remove(l.mru.Back()) - delete(l.entries, e.(*element).key) -} diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go deleted file mode 100644 index a39b84a27843..000000000000 --- a/vendor/github.com/aws/smithy-go/context/suppress_expired.go +++ /dev/null @@ -1,81 +0,0 @@ -package context - -import "context" - -// valueOnlyContext provides a utility to preserve only the values of a -// Context. Suppressing any cancellation or deadline on that context being -// propagated downstream of this value. -// -// If preserveExpiredValues is false (default), and the valueCtx is canceled, -// calls to lookup values with the Values method, will always return nil. Setting -// preserveExpiredValues to true, will allow the valueOnlyContext to lookup -// values in valueCtx even if valueCtx is canceled. -// -// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. -// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go -type valueOnlyContext struct { - context.Context - - preserveExpiredValues bool - valuesCtx context.Context -} - -var _ context.Context = (*valueOnlyContext)(nil) - -// Value looks up the key, returning its value. If configured to not preserve -// values of expired context, and the wrapping context is canceled, nil will be -// returned. -func (v *valueOnlyContext) Value(key interface{}) interface{} { - if !v.preserveExpiredValues { - select { - case <-v.valuesCtx.Done(): - return nil - default: - } - } - - return v.valuesCtx.Value(key) -} - -// WithSuppressCancel wraps the Context value, suppressing its deadline and -// cancellation events being propagated downstream to consumer of the returned -// context. -// -// By default the wrapped Context's Values are available downstream until the -// wrapped Context is canceled. Once the wrapped Context is canceled, Values -// method called on the context return will no longer lookup any key. As they -// are now considered expired. -// -// To override this behavior, use WithPreserveExpiredValues on the Context -// before it is wrapped by WithSuppressCancel. This will make the Context -// returned by WithSuppressCancel allow lookup of expired values. -func WithSuppressCancel(ctx context.Context) context.Context { - return &valueOnlyContext{ - Context: context.Background(), - valuesCtx: ctx, - - preserveExpiredValues: GetPreserveExpiredValues(ctx), - } -} - -type preserveExpiredValuesKey struct{} - -// WithPreserveExpiredValues adds a Value to the Context if expired values -// should be preserved, and looked up by a Context wrapped by -// WithSuppressCancel. -// -// WithPreserveExpiredValues must be added as a value to a Context, before that -// Context is wrapped by WithSuppressCancel -func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { - return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) -} - -// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues -// value in the context. Returning true if enabled, false otherwise. -func GetPreserveExpiredValues(ctx context.Context) bool { - v := ctx.Value(preserveExpiredValuesKey{}) - if v != nil { - return v.(bool) - } - return false -} diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go deleted file mode 100644 index 87b0c74b75c6..000000000000 --- a/vendor/github.com/aws/smithy-go/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy provides the core components for a Smithy SDK. -package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go deleted file mode 100644 index dec498c57bf9..000000000000 --- a/vendor/github.com/aws/smithy-go/document.go +++ /dev/null @@ -1,10 +0,0 @@ -package smithy - -// Document provides access to loosely structured data in a document-like -// format. -// -// Deprecated: See the github.com/aws/smithy-go/document package. -type Document interface { - UnmarshalDocument(interface{}) error - GetValue() (interface{}, error) -} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go deleted file mode 100644 index 03055b7a1c2e..000000000000 --- a/vendor/github.com/aws/smithy-go/document/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package document provides interface definitions and error types for document types. -// -// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send -// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 -// strings to these values. -// -// API Clients expose document constructors in their respective client document packages which must be used to -// Marshal and Unmarshal Go types to and from their respective protocol representations. -// -// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from -// document types. -package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go deleted file mode 100644 index 8f852d95c699..000000000000 --- a/vendor/github.com/aws/smithy-go/document/document.go +++ /dev/null @@ -1,153 +0,0 @@ -package document - -import ( - "fmt" - "math/big" - "strconv" -) - -// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and -// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. -// -// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. -// Anonymous nested types are flattened based on Go anonymous type visibility. -// -// When defining struct types. the `document` struct tag can be used to control how the value will be -// marshaled into the resulting protocol document. -// -// // Field is ignored -// Field int `document:"-"` -// -// // Field object of key "myName" -// Field int `document:"myName"` -// -// // Field object key of key "myName", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:"myName,omitempty"` -// -// // Field object key of "Field", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:",omitempty"` -// -// All struct fields, including anonymous fields, are marshaled unless the -// any of the following conditions are meet. -// -// - the field is not exported -// - document field tag is "-" -// - document field tag specifies "omitempty", and is a zero value. -// -// Pointer and interface values are encoded as the value pointed to or -// contained in the interface. A nil value encodes as a null -// value unless `omitempty` struct tag is provided. -// -// Channel, complex, and function values are not encoded and will be skipped -// when walking the value to be marshaled. -// -// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented -// by your application as a string or numerical representation. -// -// Errors that occur when marshaling will stop the marshaler, and return the error. -// -// Marshal cannot represent cyclic data structures and will not handle them. -// Passing cyclic structures to Marshal will result in an infinite recursion. -type Marshaler interface { - MarshalSmithyDocument() ([]byte, error) -} - -// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and -// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be -// returned. -// -// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` -// struct field tag for controlling how struct fields are unmarshaled. -// -// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document -// into an empty interface the Unmarshaler will store one of these values: -// bool, for boolean values -// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) -// string, for string values -// []interface{}, for array values -// map[string]interface{}, for objects -// nil, for null values -// -// When unmarshaling, any error that occurs will halt the unmarshal and return the error. -type Unmarshaler interface { - UnmarshalSmithyDocument(v interface{}) error -} - -type noSerde interface { - noSmithyDocumentSerde() -} - -// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled -// into a protocol document. -type NoSerde struct{} - -func (n NoSerde) noSmithyDocumentSerde() {} - -var _ noSerde = (*NoSerde)(nil) - -// IsNoSerde returns whether the given type implements the no smithy document serde interface. -func IsNoSerde(x interface{}) bool { - _, ok := x.(noSerde) - return ok -} - -// Number is an arbitrary precision numerical value -type Number string - -// Int64 returns the number as a string. -func (n Number) String() string { - return string(n) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return n.intOfBitSize(64) -} - -func (n Number) intOfBitSize(bitSize int) (int64, error) { - return strconv.ParseInt(string(n), 10, bitSize) -} - -// Uint64 returns the number as a uint64. -func (n Number) Uint64() (uint64, error) { - return n.uintOfBitSize(64) -} - -func (n Number) uintOfBitSize(bitSize int) (uint64, error) { - return strconv.ParseUint(string(n), 10, bitSize) -} - -// Float32 returns the number parsed as a 32-bit float, returns a float64. -func (n Number) Float32() (float64, error) { - return n.floatOfBitSize(32) -} - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return n.floatOfBitSize(64) -} - -// Float64 returns the number as a float64. -func (n Number) floatOfBitSize(bitSize int) (float64, error) { - return strconv.ParseFloat(string(n), bitSize) -} - -// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. -func (n Number) BigFloat() (*big.Float, error) { - f, ok := (&big.Float{}).SetString(string(n)) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} - -// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. -func (n Number) BigInt() (*big.Int, error) { - f, ok := (&big.Int{}).SetString(string(n), 10) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go deleted file mode 100644 index 046a7a765318..000000000000 --- a/vendor/github.com/aws/smithy-go/document/errors.go +++ /dev/null @@ -1,75 +0,0 @@ -package document - -import ( - "fmt" - "reflect" -) - -// UnmarshalTypeError is an error type representing an error -// unmarshaling a Smithy document to a Go value type. This is different -// from UnmarshalError in that it does not wrap an underlying error type. -type UnmarshalTypeError struct { - Value string - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalTypeError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", - e.Value, e.Type.String()) -} - -// An InvalidUnmarshalError is an error type representing an invalid type -// encountered while unmarshaling a Smithy document to a Go value type. -type InvalidUnmarshalError struct { - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidUnmarshalError) Error() string { - var msg string - if e.Type == nil { - msg = "cannot unmarshal to nil value" - } else if e.Type.Kind() != reflect.Ptr { - msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) - } else { - msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) - } - - return fmt.Sprintf("unmarshal failed, %s", msg) -} - -// An UnmarshalError wraps an error that occurred while unmarshaling a -// Smithy document into a Go type. This is different from -// UnmarshalTypeError in that it wraps the underlying error that occurred. -type UnmarshalError struct { - Err error - Value string - Type reflect.Type -} - -// Unwrap returns the underlying unmarshaling error -func (e *UnmarshalError) Unwrap() error { - return e.Err -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", - e.Value, e.Type.String(), e.Err) -} - -// An InvalidMarshalError is an error type representing an error -// occurring when marshaling a Go value type. -type InvalidMarshalError struct { - Message string -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidMarshalError) Error() string { - return fmt.Sprintf("marshal failed, %s", e.Message) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go deleted file mode 100644 index 792fdfa08b39..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package encoding provides utilities for encoding values for specific -// document encodings. - -package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go deleted file mode 100644 index 2fdfb5225027..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/encoding.go +++ /dev/null @@ -1,40 +0,0 @@ -package encoding - -import ( - "fmt" - "math" - "strconv" -) - -// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol -// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers -// -// Based on encoding/json floatEncoder from the Go Standard Library -// https://golang.org/src/encoding/json/encode.go -func EncodeFloat(dst []byte, v float64, bits int) []byte { - if math.IsInf(v, 0) || math.IsNaN(v) { - panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) - } - - abs := math.Abs(v) - fmt := byte('f') - - if abs != 0 { - if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { - fmt = 'e' - } - } - - dst = strconv.AppendFloat(dst, v, fmt, -1, bits) - - if fmt == 'e' { - // clean up e-09 to e-9 - n := len(dst) - if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { - dst[n-2] = dst[n-1] - dst = dst[:n-1] - } - } - - return dst -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go deleted file mode 100644 index 543e7cf03871..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go +++ /dev/null @@ -1,123 +0,0 @@ -package httpbinding - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -const ( - contentLengthHeader = "Content-Length" - floatNaN = "NaN" - floatInfinity = "Infinity" - floatNegInfinity = "-Infinity" -) - -// An Encoder provides encoding of REST URI path, query, and header components -// of an HTTP request. Can also encode a stream as the payload. -// -// Does not support SetFields. -type Encoder struct { - path, rawPath, pathBuffer []byte - - query url.Values - header http.Header -} - -// NewEncoder creates a new encoder from the passed in request. It assumes that -// raw path contains no valuable information at this point, so it passes in path -// as path and raw path for subsequent trans -func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { - return NewEncoderWithRawPath(path, path, query, headers) -} - -// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and -// header values will be added on top of the request's existing values. Overwriting -// duplicate values. -func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) { - parseQuery, err := url.ParseQuery(query) - if err != nil { - return nil, fmt.Errorf("failed to parse query string: %w", err) - } - - e := &Encoder{ - path: []byte(path), - rawPath: []byte(rawPath), - query: parseQuery, - header: headers.Clone(), - } - - return e, nil -} - -// Encode returns a REST protocol encoder for encoding HTTP bindings. -// -// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode -// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. -// -// Returns any error occurring during encoding. -func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { - req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) - req.URL.RawQuery = e.query.Encode() - - // net/http ignores Content-Length header and requires it to be set on http.Request - if v := e.header.Get(contentLengthHeader); len(v) > 0 { - iv, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, err - } - req.ContentLength = iv - e.header.Del(contentLengthHeader) - } - - req.Header = e.header - - return req, nil -} - -// AddHeader returns a HeaderValue for appending to the given header name -func (e *Encoder) AddHeader(key string) HeaderValue { - return newHeaderValue(e.header, key, true) -} - -// SetHeader returns a HeaderValue for setting the given header name -func (e *Encoder) SetHeader(key string) HeaderValue { - return newHeaderValue(e.header, key, false) -} - -// Headers returns a Header used for encoding headers with the given prefix -func (e *Encoder) Headers(prefix string) Headers { - return Headers{ - header: e.header, - prefix: strings.TrimSpace(prefix), - } -} - -// HasHeader returns if a header with the key specified exists with one or -// more value. -func (e Encoder) HasHeader(key string) bool { - return len(e.header[key]) != 0 -} - -// SetURI returns a URIValue used for setting the given path key -func (e *Encoder) SetURI(key string) URIValue { - return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) -} - -// SetQuery returns a QueryValue used for setting the given query key -func (e *Encoder) SetQuery(key string) QueryValue { - return NewQueryValue(e.query, key, false) -} - -// AddQuery returns a QueryValue used for appending the given query key -func (e *Encoder) AddQuery(key string) QueryValue { - return NewQueryValue(e.query, key, true) -} - -// HasQuery returns if a query with the key specified exists with one or -// more values. -func (e *Encoder) HasQuery(key string) bool { - return len(e.query.Get(key)) != 0 -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go deleted file mode 100644 index f9256e175fc9..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go +++ /dev/null @@ -1,122 +0,0 @@ -package httpbinding - -import ( - "encoding/base64" - "math" - "math/big" - "net/http" - "strconv" - "strings" -) - -// Headers is used to encode header keys using a provided prefix -type Headers struct { - header http.Header - prefix string -} - -// AddHeader returns a HeaderValue used to append values to prefix+key -func (h Headers) AddHeader(key string) HeaderValue { - return h.newHeaderValue(key, true) -} - -// SetHeader returns a HeaderValue used to set the value of prefix+key -func (h Headers) SetHeader(key string) HeaderValue { - return h.newHeaderValue(key, false) -} - -func (h Headers) newHeaderValue(key string, append bool) HeaderValue { - return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) -} - -// HeaderValue is used to encode values to an HTTP header -type HeaderValue struct { - header http.Header - key string - append bool -} - -func newHeaderValue(header http.Header, key string, append bool) HeaderValue { - return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} -} - -func (h HeaderValue) modifyHeader(value string) { - if h.append { - h.header[h.key] = append(h.header[h.key], value) - } else { - h.header[h.key] = append(h.header[h.key][:0], value) - } -} - -// String encodes the value v as the header string value -func (h HeaderValue) String(v string) { - h.modifyHeader(v) -} - -// Byte encodes the value v as a query string value -func (h HeaderValue) Byte(v int8) { - h.Long(int64(v)) -} - -// Short encodes the value v as a query string value -func (h HeaderValue) Short(v int16) { - h.Long(int64(v)) -} - -// Integer encodes the value v as the header string value -func (h HeaderValue) Integer(v int32) { - h.Long(int64(v)) -} - -// Long encodes the value v as the header string value -func (h HeaderValue) Long(v int64) { - h.modifyHeader(strconv.FormatInt(v, 10)) -} - -// Boolean encodes the value v as a query string value -func (h HeaderValue) Boolean(v bool) { - h.modifyHeader(strconv.FormatBool(v)) -} - -// Float encodes the value v as a query string value -func (h HeaderValue) Float(v float32) { - h.float(float64(v), 32) -} - -// Double encodes the value v as a query string value -func (h HeaderValue) Double(v float64) { - h.float(v, 64) -} - -func (h HeaderValue) float(v float64, bitSize int) { - switch { - case math.IsNaN(v): - h.String(floatNaN) - case math.IsInf(v, 1): - h.String(floatInfinity) - case math.IsInf(v, -1): - h.String(floatNegInfinity) - default: - h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes the value v as a query string value -func (h HeaderValue) BigInteger(v *big.Int) { - h.modifyHeader(v.String()) -} - -// BigDecimal encodes the value v as a query string value -func (h HeaderValue) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - h.Long(i) - return - } - h.modifyHeader(v.Text('e', -1)) -} - -// Blob encodes the value v as a base64 header string value -func (h HeaderValue) Blob(v []byte) { - encodeToString := base64.StdEncoding.EncodeToString(v) - h.modifyHeader(encodeToString) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go deleted file mode 100644 index 9ae308540cbc..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go +++ /dev/null @@ -1,108 +0,0 @@ -package httpbinding - -import ( - "bytes" - "fmt" -) - -const ( - uriTokenStart = '{' - uriTokenStop = '}' - uriTokenSkip = '+' -) - -func bufCap(b []byte, n int) []byte { - if cap(b) < n { - return make([]byte, 0, n) - } - - return b[0:0] -} - -// replacePathElement replaces a single element in the path []byte. -// Escape is used to control whether the value will be escaped using Amazon path escape style. -func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { - // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error - fieldBuf = bufCap(fieldBuf, len(key)+2) // { } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - fieldBuf = append(fieldBuf, uriTokenStop) - - start := bytes.Index(path, fieldBuf) - encodeSep := true - if start < 0 { - fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - fieldBuf = append(fieldBuf, uriTokenSkip) - fieldBuf = append(fieldBuf, uriTokenStop) - - start = bytes.Index(path, fieldBuf) - if start < 0 { - return path, fieldBuf, fmt.Errorf("invalid path index, start=%d. %s", start, path) - } - encodeSep = false - } - end := start + len(fieldBuf) - - if escape { - val = EscapePath(val, encodeSep) - } - - fieldBuf = bufCap(fieldBuf, len(val)) - fieldBuf = append(fieldBuf, val...) - - keyLen := end - start - valLen := len(fieldBuf) - - if keyLen == valLen { - copy(path[start:], fieldBuf) - return path, fieldBuf, nil - } - - newLen := len(path) + (valLen - keyLen) - if len(path) < newLen { - path = path[:cap(path)] - } - if cap(path) < newLen { - newURI := make([]byte, newLen) - copy(newURI, path) - path = newURI - } - - // shift - copy(path[start+valLen:], path[end:]) - path = path[:newLen] - copy(path[start:], fieldBuf) - - return path, fieldBuf, nil -} - -// EscapePath escapes part of a URL path in Amazon style. -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -var noEscape [256]bool - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go deleted file mode 100644 index c2e7d0a20f45..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go +++ /dev/null @@ -1,107 +0,0 @@ -package httpbinding - -import ( - "encoding/base64" - "math" - "math/big" - "net/url" - "strconv" -) - -// QueryValue is used to encode query key values -type QueryValue struct { - query url.Values - key string - append bool -} - -// NewQueryValue creates a new QueryValue which enables encoding -// a query value into the given url.Values. -func NewQueryValue(query url.Values, key string, append bool) QueryValue { - return QueryValue{ - query: query, - key: key, - append: append, - } -} - -func (qv QueryValue) updateKey(value string) { - if qv.append { - qv.query.Add(qv.key, value) - } else { - qv.query.Set(qv.key, value) - } -} - -// Blob encodes v as a base64 query string value -func (qv QueryValue) Blob(v []byte) { - encodeToString := base64.StdEncoding.EncodeToString(v) - qv.updateKey(encodeToString) -} - -// Boolean encodes v as a query string value -func (qv QueryValue) Boolean(v bool) { - qv.updateKey(strconv.FormatBool(v)) -} - -// String encodes v as a query string value -func (qv QueryValue) String(v string) { - qv.updateKey(v) -} - -// Byte encodes v as a query string value -func (qv QueryValue) Byte(v int8) { - qv.Long(int64(v)) -} - -// Short encodes v as a query string value -func (qv QueryValue) Short(v int16) { - qv.Long(int64(v)) -} - -// Integer encodes v as a query string value -func (qv QueryValue) Integer(v int32) { - qv.Long(int64(v)) -} - -// Long encodes v as a query string value -func (qv QueryValue) Long(v int64) { - qv.updateKey(strconv.FormatInt(v, 10)) -} - -// Float encodes v as a query string value -func (qv QueryValue) Float(v float32) { - qv.float(float64(v), 32) -} - -// Double encodes v as a query string value -func (qv QueryValue) Double(v float64) { - qv.float(v, 64) -} - -func (qv QueryValue) float(v float64, bitSize int) { - switch { - case math.IsNaN(v): - qv.String(floatNaN) - case math.IsInf(v, 1): - qv.String(floatInfinity) - case math.IsInf(v, -1): - qv.String(floatNegInfinity) - default: - qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes v as a query string value -func (qv QueryValue) BigInteger(v *big.Int) { - qv.updateKey(v.String()) -} - -// BigDecimal encodes v as a query string value -func (qv QueryValue) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - qv.Long(i) - return - } - qv.updateKey(v.Text('e', -1)) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go deleted file mode 100644 index f04e11984ac3..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go +++ /dev/null @@ -1,111 +0,0 @@ -package httpbinding - -import ( - "math" - "math/big" - "strconv" - "strings" -) - -// URIValue is used to encode named URI parameters -type URIValue struct { - path, rawPath, buffer *[]byte - - key string -} - -func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { - return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} -} - -func (u URIValue) modifyURI(value string) (err error) { - *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) - if err != nil { - return err - } - *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) - return err -} - -// Boolean encodes v as a URI string value -func (u URIValue) Boolean(v bool) error { - return u.modifyURI(strconv.FormatBool(v)) -} - -// String encodes v as a URI string value -func (u URIValue) String(v string) error { - return u.modifyURI(v) -} - -// Byte encodes v as a URI string value -func (u URIValue) Byte(v int8) error { - return u.Long(int64(v)) -} - -// Short encodes v as a URI string value -func (u URIValue) Short(v int16) error { - return u.Long(int64(v)) -} - -// Integer encodes v as a URI string value -func (u URIValue) Integer(v int32) error { - return u.Long(int64(v)) -} - -// Long encodes v as a URI string value -func (u URIValue) Long(v int64) error { - return u.modifyURI(strconv.FormatInt(v, 10)) -} - -// Float encodes v as a query string value -func (u URIValue) Float(v float32) error { - return u.float(float64(v), 32) -} - -// Double encodes v as a query string value -func (u URIValue) Double(v float64) error { - return u.float(v, 64) -} - -func (u URIValue) float(v float64, bitSize int) error { - switch { - case math.IsNaN(v): - return u.String(floatNaN) - case math.IsInf(v, 1): - return u.String(floatInfinity) - case math.IsInf(v, -1): - return u.String(floatNegInfinity) - default: - return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes v as a query string value -func (u URIValue) BigInteger(v *big.Int) error { - return u.modifyURI(v.String()) -} - -// BigDecimal encodes v as a query string value -func (u URIValue) BigDecimal(v *big.Float) error { - if i, accuracy := v.Int64(); accuracy == big.Exact { - return u.Long(i) - } - return u.modifyURI(v.Text('e', -1)) -} - -// SplitURI parses a Smithy HTTP binding trait URI -func SplitURI(uri string) (path, query string) { - queryStart := strings.IndexRune(uri, '?') - if queryStart == -1 { - path = uri - return path, query - } - - path = uri[:queryStart] - if queryStart+1 >= len(uri) { - return path, query - } - query = uri[queryStart+1:] - - return path, query -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go deleted file mode 100644 index 7a232f660f19..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/array.go +++ /dev/null @@ -1,35 +0,0 @@ -package json - -import ( - "bytes" -) - -// Array represents the encoding of a JSON Array -type Array struct { - w *bytes.Buffer - writeComma bool - scratch *[]byte -} - -func newArray(w *bytes.Buffer, scratch *[]byte) *Array { - w.WriteRune(leftBracket) - return &Array{w: w, scratch: scratch} -} - -// Value adds a new element to the JSON Array. -// Returns a Value type that is used to encode -// the array element. -func (a *Array) Value() Value { - if a.writeComma { - a.w.WriteRune(comma) - } else { - a.writeComma = true - } - - return newValue(a.w, a.scratch) -} - -// Close encodes the end of the JSON Array -func (a *Array) Close() { - a.w.WriteRune(rightBracket) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go deleted file mode 100644 index 91044092aef6..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package json - -const ( - leftBrace = '{' - rightBrace = '}' - - leftBracket = '[' - rightBracket = ']' - - comma = ',' - quote = '"' - colon = ':' - - null = "null" -) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go deleted file mode 100644 index 7050c85b3c61..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go +++ /dev/null @@ -1,139 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// DiscardUnknownField discards unknown fields from a decoder body. -// This function is useful while deserializing a JSON body with additional -// unknown information that should be discarded. -func DiscardUnknownField(decoder *json.Decoder) error { - // This deliberately does not share logic with CollectUnknownField, even - // though it could, because if we were to delegate to that then we'd incur - // extra allocations and general memory usage. - v, err := decoder.Token() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - if _, ok := v.(json.Delim); ok { - for decoder.More() { - err = DiscardUnknownField(decoder) - } - endToken, err := decoder.Token() - if err != nil { - return err - } - if _, ok := endToken.(json.Delim); !ok { - return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", - endToken, endToken) - } - } - - return nil -} - -// CollectUnknownField grabs the contents of unknown fields from the decoder body -// and returns them as a byte slice. This is useful for skipping unknown fields without -// completely discarding them. -func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { - result, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - buff := bytes.NewBuffer(nil) - encoder := json.NewEncoder(buff) - - if err := encoder.Encode(result); err != nil { - return nil, err - } - - return buff.Bytes(), nil -} - -func collectUnknownField(decoder *json.Decoder) (interface{}, error) { - // Grab the initial value. This could either be a concrete value like a string or a a - // delimiter. - token, err := decoder.Token() - if err == io.EOF { - return nil, nil - } - if err != nil { - return nil, err - } - - // If it's an array or object, we'll need to recurse. - delim, ok := token.(json.Delim) - if ok { - var result interface{} - if delim == '{' { - result, err = collectUnknownObject(decoder) - if err != nil { - return nil, err - } - } else { - result, err = collectUnknownArray(decoder) - if err != nil { - return nil, err - } - } - - // Discard the closing token. decoder.Token handles checking for matching delimiters - if _, err := decoder.Token(); err != nil { - return nil, err - } - return result, nil - } - - return token, nil -} - -func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { - // We need to create an empty array here instead of a nil array, since by getting - // into this function at all we necessarily have seen a non-nil list. - array := []interface{}{} - - for decoder.More() { - value, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - array = append(array, value) - } - - return array, nil -} - -func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { - object := make(map[string]interface{}) - - for decoder.More() { - key, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - // Keys have to be strings, which is particularly important as the encoder - // won't except a map with interface{} keys - stringKey, ok := key.(string) - if !ok { - return nil, fmt.Errorf("expected string key, found %T", key) - } - - value, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - object[stringKey] = value - } - - return object, nil -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go deleted file mode 100644 index 8772953f1e6f..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go +++ /dev/null @@ -1,30 +0,0 @@ -package json - -import ( - "bytes" -) - -// Encoder is JSON encoder that supports construction of JSON values -// using methods. -type Encoder struct { - w *bytes.Buffer - Value -} - -// NewEncoder returns a new JSON encoder -func NewEncoder() *Encoder { - writer := bytes.NewBuffer(nil) - scratch := make([]byte, 64) - - return &Encoder{w: writer, Value: newValue(writer, &scratch)} -} - -// String returns the String output of the JSON encoder -func (e Encoder) String() string { - return e.w.String() -} - -// Bytes returns the []byte slice of the JSON encoder -func (e Encoder) Bytes() []byte { - return e.w.Bytes() -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go deleted file mode 100644 index d984d0cdca12..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/escape.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet - -package json - -import ( - "bytes" - "unicode/utf8" -) - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// copied from Go 1.8 stdlib's encoding/json/#hex -var hex = "0123456789abcdef" - -// escapeStringBytes escapes and writes the passed in string bytes to the dst -// buffer -// -// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes -func escapeStringBytes(e *bytes.Buffer, s []byte) { - e.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - e.Write(s[start:i]) - } - switch b { - case '\\', '"': - e.WriteByte('\\') - e.WriteByte(b) - case '\n': - e.WriteByte('\\') - e.WriteByte('n') - case '\r': - e.WriteByte('\\') - e.WriteByte('r') - case '\t': - e.WriteByte('\\') - e.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - // If escapeHTML is set, it also escapes <, >, and & - // because they can lead to security holes when - // user-controlled strings are rendered into JSON - // and served to some browsers. - e.WriteString(`\u00`) - e.WriteByte(hex[b>>4]) - e.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\u202`) - e.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - e.Write(s[start:]) - } - e.WriteByte('"') -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go deleted file mode 100644 index 722346d0358b..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/object.go +++ /dev/null @@ -1,40 +0,0 @@ -package json - -import ( - "bytes" -) - -// Object represents the encoding of a JSON Object type -type Object struct { - w *bytes.Buffer - writeComma bool - scratch *[]byte -} - -func newObject(w *bytes.Buffer, scratch *[]byte) *Object { - w.WriteRune(leftBrace) - return &Object{w: w, scratch: scratch} -} - -func (o *Object) writeKey(key string) { - escapeStringBytes(o.w, []byte(key)) - o.w.WriteRune(colon) -} - -// Key adds the given named key to the JSON object. -// Returns a Value encoder that should be used to encode -// a JSON value type. -func (o *Object) Key(name string) Value { - if o.writeComma { - o.w.WriteRune(comma) - } else { - o.writeComma = true - } - o.writeKey(name) - return newValue(o.w, o.scratch) -} - -// Close encodes the end of the JSON Object -func (o *Object) Close() { - o.w.WriteRune(rightBrace) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go deleted file mode 100644 index b41ff1e15c2c..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/value.go +++ /dev/null @@ -1,149 +0,0 @@ -package json - -import ( - "bytes" - "encoding/base64" - "math/big" - "strconv" - - "github.com/aws/smithy-go/encoding" -) - -// Value represents a JSON Value type -// JSON Value types: Object, Array, String, Number, Boolean, and Null -type Value struct { - w *bytes.Buffer - scratch *[]byte -} - -// newValue returns a new Value encoder -func newValue(w *bytes.Buffer, scratch *[]byte) Value { - return Value{w: w, scratch: scratch} -} - -// String encodes v as a JSON string -func (jv Value) String(v string) { - escapeStringBytes(jv.w, []byte(v)) -} - -// Byte encodes v as a JSON number -func (jv Value) Byte(v int8) { - jv.Long(int64(v)) -} - -// Short encodes v as a JSON number -func (jv Value) Short(v int16) { - jv.Long(int64(v)) -} - -// Integer encodes v as a JSON number -func (jv Value) Integer(v int32) { - jv.Long(int64(v)) -} - -// Long encodes v as a JSON number -func (jv Value) Long(v int64) { - *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) - jv.w.Write(*jv.scratch) -} - -// ULong encodes v as a JSON number -func (jv Value) ULong(v uint64) { - *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) - jv.w.Write(*jv.scratch) -} - -// Float encodes v as a JSON number -func (jv Value) Float(v float32) { - jv.float(float64(v), 32) -} - -// Double encodes v as a JSON number -func (jv Value) Double(v float64) { - jv.float(v, 64) -} - -func (jv Value) float(v float64, bits int) { - *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) - jv.w.Write(*jv.scratch) -} - -// Boolean encodes v as a JSON boolean -func (jv Value) Boolean(v bool) { - *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) - jv.w.Write(*jv.scratch) -} - -// Base64EncodeBytes writes v as a base64 value in JSON string -func (jv Value) Base64EncodeBytes(v []byte) { - encodeByteSlice(jv.w, (*jv.scratch)[:0], v) -} - -// Write writes v directly to the JSON document -func (jv Value) Write(v []byte) { - jv.w.Write(v) -} - -// Array returns a new Array encoder -func (jv Value) Array() *Array { - return newArray(jv.w, jv.scratch) -} - -// Object returns a new Object encoder -func (jv Value) Object() *Object { - return newObject(jv.w, jv.scratch) -} - -// Null encodes a null JSON value -func (jv Value) Null() { - jv.w.WriteString(null) -} - -// BigInteger encodes v as JSON value -func (jv Value) BigInteger(v *big.Int) { - jv.w.Write([]byte(v.Text(10))) -} - -// BigDecimal encodes v as JSON value -func (jv Value) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - jv.Long(i) - return - } - // TODO: Should this try to match ES6 ToString similar to stdlib JSON? - jv.w.Write([]byte(v.Text('e', -1))) -} - -// Based on encoding/json encodeByteSlice from the Go Standard Library -// https://golang.org/src/encoding/json/encode.go -func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { - if v == nil { - w.WriteString(null) - return - } - - w.WriteRune(quote) - - encodedLen := base64.StdEncoding.EncodedLen(len(v)) - if encodedLen <= len(scratch) { - // If the encoded bytes fit in e.scratch, avoid an extra - // allocation and use the cheaper Encoding.Encode. - dst := scratch[:encodedLen] - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else if encodedLen <= 1024 { - // The encoded bytes are short enough to allocate for, and - // Encoding.Encode is still cheaper. - dst := make([]byte, encodedLen) - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else { - // The encoded bytes are too long to cheaply allocate, and - // Encoding.Encode is no longer noticeably cheaper. - enc := base64.NewEncoder(base64.StdEncoding, w) - enc.Write(v) - enc.Close() - } - - w.WriteRune(quote) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go deleted file mode 100644 index 508f3c997ec5..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/array.go +++ /dev/null @@ -1,49 +0,0 @@ -package xml - -// arrayMemberWrapper is the default member wrapper tag name for XML Array type -var arrayMemberWrapper = StartElement{ - Name: Name{Local: "member"}, -} - -// Array represents the encoding of a XML array type -type Array struct { - w writer - scratch *[]byte - - // member start element is the array member wrapper start element - memberStartElement StartElement - - // isFlattened indicates if the array is a flattened array. - isFlattened bool -} - -// newArray returns an array encoder. -// It also takes in the member start element, array start element. -// It takes in a isFlattened bool, indicating that an array is flattened array. -// -// A wrapped array ["value1", "value2"] is represented as -// `value1value2`. - -// A flattened array `someList: ["value1", "value2"]` is represented as -// `value1value2`. -func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { - var memberWrapper = memberStartElement - if isFlattened { - memberWrapper = arrayStartElement - } - - return &Array{ - w: w, - scratch: scratch, - memberStartElement: memberWrapper, - isFlattened: isFlattened, - } -} - -// Member adds a new member to the XML array. -// It returns a Value encoder. -func (a *Array) Member() Value { - v := newValue(a.w, a.scratch, a.memberStartElement) - v.isFlattened = a.isFlattened - return v -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go deleted file mode 100644 index ccee90a636bb..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go +++ /dev/null @@ -1,10 +0,0 @@ -package xml - -const ( - leftAngleBracket = '<' - rightAngleBracket = '>' - forwardSlash = '/' - colon = ':' - equals = '=' - quote = '"' -) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go deleted file mode 100644 index f9200093e879..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to -shape serializer function in which a xml.Value will be passed around. - -Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings - -Member Element - -Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element -write their own element start tag. These elements should always be closed. - -Flattened Element - -Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element -do not write a start tag, and thus should not be closed. - -Simple types encoding - -All simple type methods on value such as String(), Long() etc; auto close the associated member element. - -Array - -Array returns the collection encoder. It has two modes, wrapped and flattened encoding. - -Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. -By default, a wrapped array members are wrapped with `member` named start element. - - appletree - -Flattened arrays rely on Value being marked as flattened. -If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. - - appletree - -Map - -Map is the map encoder. It has two modes, wrapped and flattened encoding. - -Wrapped map has Array() method, which facilitate map member wrapping. -By default, a wrapped map members are wrapped with `entry` named start element. - - appletreesnowice - -Flattened map rely on Value being marked as flattened. -If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. - - appletreesnowice -*/ -package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go deleted file mode 100644 index ae84e7999edb..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/element.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.14 stdlib's encoding/xml - -package xml - -// A Name represents an XML name (Local) annotated -// with a name space identifier (Space). -// In tokens returned by Decoder.Token, the Space identifier -// is given as a canonical URL, not the short prefix used -// in the document being parsed. -type Name struct { - Space, Local string -} - -// An Attr represents an attribute in an XML element (Name=Value). -type Attr struct { - Name Name - Value string -} - -/* -NewAttribute returns a pointer to an attribute. -It takes in a local name aka attribute name, and value -representing the attribute value. -*/ -func NewAttribute(local, value string) Attr { - return Attr{ - Name: Name{ - Local: local, - }, - Value: value, - } -} - -/* -NewNamespaceAttribute returns a pointer to an attribute. -It takes in a local name aka attribute name, and value -representing the attribute value. - -NewNamespaceAttribute appends `xmlns:` in front of namespace -prefix. - -For creating a name space attribute representing -`xmlns:prefix="http://example.com`, the breakdown would be: -local = "prefix" -value = "http://example.com" -*/ -func NewNamespaceAttribute(local, value string) Attr { - attr := NewAttribute(local, value) - - // default name space identifier - attr.Name.Space = "xmlns" - return attr -} - -// A StartElement represents an XML start element. -type StartElement struct { - Name Name - Attr []Attr -} - -// Copy creates a new copy of StartElement. -func (e StartElement) Copy() StartElement { - attrs := make([]Attr, len(e.Attr)) - copy(attrs, e.Attr) - e.Attr = attrs - return e -} - -// End returns the corresponding XML end element. -func (e StartElement) End() EndElement { - return EndElement{e.Name} -} - -// returns true if start element local name is empty -func (e StartElement) isZero() bool { - return len(e.Name.Local) == 0 -} - -// An EndElement represents an XML end element. -type EndElement struct { - Name Name -} - -// returns true if end element local name is empty -func (e EndElement) isZero() bool { - return len(e.Name.Local) == 0 -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go deleted file mode 100644 index 16fb3dddb0a8..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -// writer interface used by the xml encoder to write an encoded xml -// document in a writer. -type writer interface { - - // Write takes in a byte slice and returns number of bytes written and error - Write(p []byte) (n int, err error) - - // WriteRune takes in a rune and returns number of bytes written and error - WriteRune(r rune) (n int, err error) - - // WriteString takes in a string and returns number of bytes written and error - WriteString(s string) (n int, err error) - - // String method returns a string - String() string - - // Bytes return a byte slice. - Bytes() []byte -} - -// Encoder is an XML encoder that supports construction of XML values -// using methods. The encoder takes in a writer and maintains a scratch buffer. -type Encoder struct { - w writer - scratch *[]byte -} - -// NewEncoder returns an XML encoder -func NewEncoder(w writer) *Encoder { - scratch := make([]byte, 64) - - return &Encoder{w: w, scratch: &scratch} -} - -// String returns the string output of the XML encoder -func (e Encoder) String() string { - return e.w.String() -} - -// Bytes returns the []byte slice of the XML encoder -func (e Encoder) Bytes() []byte { - return e.w.Bytes() -} - -// RootElement builds a root element encoding -// It writes it's start element tag. The value should be closed. -func (e Encoder) RootElement(element StartElement) Value { - return newValue(e.w, e.scratch, element) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go deleted file mode 100644 index f3db6ccca85c..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "io" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string - Message string -} - -// GetErrorResponseComponents returns the error fields from an xml error response body -func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { - if noErrorWrapping { - var errResponse noWrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - }, nil - } - - var errResponse wrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - }, nil -} - -// noWrappedErrorResponse represents the error response body with -// no internal ... -type wrappedErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go deleted file mode 100644 index 1c5479af677d..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.14 stdlib's encoding/xml - -package xml - -import ( - "unicode/utf8" -) - -// Copied from Go 1.14 stdlib's encoding/xml -var ( - escQuot = []byte(""") // shorter than """ - escApos = []byte("'") // shorter than "'" - escAmp = []byte("&") - escLT = []byte("<") - escGT = []byte(">") - escTab = []byte(" ") - escNL = []byte(" ") - escCR = []byte(" ") - escFFFD = []byte("\uFFFD") // Unicode replacement character - - // Additional Escapes - escNextLine = []byte("…") - escLS = []byte("
") -) - -// Decide whether the given rune is in the XML Character Range, per -// the Char production of https://www.xml.com/axml/testaxml.htm, -// Section 2.2 Characters. -func isInCharacterRange(r rune) (inrange bool) { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} - -// TODO: When do we need to escape the string? -// Based on encoding/xml escapeString from the Go Standard Library. -// https://golang.org/src/encoding/xml/xml.go -func escapeString(e writer, s string) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRuneInString(s[i:]) - i += width - switch r { - case '"': - esc = escQuot - case '\'': - esc = escApos - case '&': - esc = escAmp - case '<': - esc = escLT - case '>': - esc = escGT - case '\t': - esc = escTab - case '\n': - esc = escNL - case '\r': - esc = escCR - case '\u0085': - // Not escaped by stdlib - esc = escNextLine - case '\u2028': - // Not escaped by stdlib - esc = escLS - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = escFFFD - break - } - continue - } - e.WriteString(s[last : i-width]) - e.Write(esc) - last = i - } - e.WriteString(s[last:]) -} - -// escapeText writes to w the properly escaped XML equivalent -// of the plain text data s. If escapeNewline is true, newline -// characters will be escaped. -// -// Based on encoding/xml escapeText from the Go Standard Library. -// https://golang.org/src/encoding/xml/xml.go -func escapeText(e writer, s []byte) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRune(s[i:]) - i += width - switch r { - case '"': - esc = escQuot - case '\'': - esc = escApos - case '&': - esc = escAmp - case '<': - esc = escLT - case '>': - esc = escGT - case '\t': - esc = escTab - case '\n': - // This always escapes newline, which is different than stdlib's optional - // escape of new line. - esc = escNL - case '\r': - esc = escCR - case '\u0085': - // Not escaped by stdlib - esc = escNextLine - case '\u2028': - // Not escaped by stdlib - esc = escLS - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = escFFFD - break - } - continue - } - e.Write(s[last : i-width]) - e.Write(esc) - last = i - } - e.Write(s[last:]) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go deleted file mode 100644 index e42858965ccc..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/map.go +++ /dev/null @@ -1,53 +0,0 @@ -package xml - -// mapEntryWrapper is the default member wrapper start element for XML Map entry -var mapEntryWrapper = StartElement{ - Name: Name{Local: "entry"}, -} - -// Map represents the encoding of a XML map type -type Map struct { - w writer - scratch *[]byte - - // member start element is the map entry wrapper start element - memberStartElement StartElement - - // isFlattened returns true if the map is a flattened map - isFlattened bool -} - -// newMap returns a map encoder which sets the default map -// entry wrapper to `entry`. -// -// A map `someMap : {{key:"abc", value:"123"}}` is represented as -// `abc123`. -func newMap(w writer, scratch *[]byte) *Map { - return &Map{ - w: w, - scratch: scratch, - memberStartElement: mapEntryWrapper, - } -} - -// newFlattenedMap returns a map encoder which sets the map -// entry wrapper to the passed in memberWrapper`. -// -// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as -// `abc123`. -func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { - return &Map{ - w: w, - scratch: scratch, - memberStartElement: memberWrapper, - isFlattened: true, - } -} - -// Entry returns a Value encoder with map's element. -// It writes the member wrapper start tag for each entry. -func (m *Map) Entry() Value { - v := newValue(m.w, m.scratch, m.memberStartElement) - v.isFlattened = m.isFlattened - return v -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go deleted file mode 100644 index 09434b2c0b55..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/value.go +++ /dev/null @@ -1,302 +0,0 @@ -package xml - -import ( - "encoding/base64" - "fmt" - "math/big" - "strconv" - - "github.com/aws/smithy-go/encoding" -) - -// Value represents an XML Value type -// XML Value types: Object, Array, Map, String, Number, Boolean. -type Value struct { - w writer - scratch *[]byte - - // xml start element is the associated start element for the Value - startElement StartElement - - // indicates if the Value represents a flattened shape - isFlattened bool -} - -// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag -func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { - return Value{ - w: w, - scratch: scratch, - startElement: startElement, - } -} - -// newValue writes the start element xml tag and returns a Value -func newValue(w writer, scratch *[]byte, startElement StartElement) Value { - writeStartElement(w, startElement) - return Value{w: w, scratch: scratch, startElement: startElement} -} - -// writeStartElement takes in a start element and writes it. -// It handles namespace, attributes in start element. -func writeStartElement(w writer, el StartElement) error { - if el.isZero() { - return fmt.Errorf("xml start element cannot be nil") - } - - w.WriteRune(leftAngleBracket) - - if len(el.Name.Space) != 0 { - escapeString(w, el.Name.Space) - w.WriteRune(colon) - } - escapeString(w, el.Name.Local) - for _, attr := range el.Attr { - w.WriteRune(' ') - writeAttribute(w, &attr) - } - - w.WriteRune(rightAngleBracket) - return nil -} - -// writeAttribute writes an attribute from a provided Attribute -// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". -// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName -func writeAttribute(w writer, attr *Attr) { - // if local, space both are not empty - if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { - escapeString(w, attr.Name.Space) - w.WriteRune(colon) - } - - // if prefix is empty, the default `xmlns` space should be used as prefix. - if len(attr.Name.Local) == 0 { - attr.Name.Local = attr.Name.Space - } - - escapeString(w, attr.Name.Local) - w.WriteRune(equals) - w.WriteRune(quote) - escapeString(w, attr.Value) - w.WriteRune(quote) -} - -// writeEndElement takes in a end element and writes it. -func writeEndElement(w writer, el EndElement) error { - if el.isZero() { - return fmt.Errorf("xml end element cannot be nil") - } - - w.WriteRune(leftAngleBracket) - w.WriteRune(forwardSlash) - - if len(el.Name.Space) != 0 { - escapeString(w, el.Name.Space) - w.WriteRune(colon) - } - escapeString(w, el.Name.Local) - w.WriteRune(rightAngleBracket) - - return nil -} - -// String encodes v as a XML string. -// It will auto close the parent xml element tag. -func (xv Value) String(v string) { - escapeString(xv.w, v) - xv.Close() -} - -// Byte encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Byte(v int8) { - xv.Long(int64(v)) -} - -// Short encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Short(v int16) { - xv.Long(int64(v)) -} - -// Integer encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Integer(v int32) { - xv.Long(int64(v)) -} - -// Long encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Long(v int64) { - *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) - xv.w.Write(*xv.scratch) - - xv.Close() -} - -// Float encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Float(v float32) { - xv.float(float64(v), 32) - xv.Close() -} - -// Double encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Double(v float64) { - xv.float(v, 64) - xv.Close() -} - -func (xv Value) float(v float64, bits int) { - *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) - xv.w.Write(*xv.scratch) -} - -// Boolean encodes v as a XML boolean. -// It will auto close the parent xml element tag. -func (xv Value) Boolean(v bool) { - *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) - xv.w.Write(*xv.scratch) - - xv.Close() -} - -// Base64EncodeBytes writes v as a base64 value in XML string. -// It will auto close the parent xml element tag. -func (xv Value) Base64EncodeBytes(v []byte) { - encodeByteSlice(xv.w, (*xv.scratch)[:0], v) - xv.Close() -} - -// BigInteger encodes v big.Int as XML value. -// It will auto close the parent xml element tag. -func (xv Value) BigInteger(v *big.Int) { - xv.w.Write([]byte(v.Text(10))) - xv.Close() -} - -// BigDecimal encodes v big.Float as XML value. -// It will auto close the parent xml element tag. -func (xv Value) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - xv.Long(i) - return - } - - xv.w.Write([]byte(v.Text('e', -1))) - xv.Close() -} - -// Write writes v directly to the xml document -// if escapeXMLText is set to true, write will escape text. -// It will auto close the parent xml element tag. -func (xv Value) Write(v []byte, escapeXMLText bool) { - // escape and write xml text - if escapeXMLText { - escapeText(xv.w, v) - } else { - // write xml directly - xv.w.Write(v) - } - - xv.Close() -} - -// MemberElement does member element encoding. It returns a Value. -// Member Element method should be used for all shapes except flattened shapes. -// -// A call to MemberElement will write nested element tags directly using the -// provided start element. The value returned by MemberElement should be closed. -func (xv Value) MemberElement(element StartElement) Value { - return newValue(xv.w, xv.scratch, element) -} - -// FlattenedElement returns flattened element encoding. It returns a Value. -// This method should be used for flattened shapes. -// -// Unlike MemberElement, flattened element will NOT write element tags -// directly for the associated start element. -// -// The value returned by the FlattenedElement does not need to be closed. -func (xv Value) FlattenedElement(element StartElement) Value { - v := newFlattenedValue(xv.w, xv.scratch, element) - v.isFlattened = true - return v -} - -// Array returns an array encoder. By default, the members of array are -// wrapped with `` element tag. -// If value is marked as flattened, the start element is used to wrap the members instead of -// the `` element. -func (xv Value) Array() *Array { - return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) -} - -/* -ArrayWithCustomName returns an array encoder. - -It takes named start element as an argument, the named start element will used to wrap xml array entries. -for eg, `entry1` -Here `customName` named start element will be wrapped on each array member. -*/ -func (xv Value) ArrayWithCustomName(element StartElement) *Array { - return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) -} - -/* -Map returns a map encoder. By default, the map entries are -wrapped with `` element tag. - -If value is marked as flattened, the start element is used to wrap the entry instead of -the `` element. -*/ -func (xv Value) Map() *Map { - // flattened map - if xv.isFlattened { - return newFlattenedMap(xv.w, xv.scratch, xv.startElement) - } - - // un-flattened map - return newMap(xv.w, xv.scratch) -} - -// encodeByteSlice is modified copy of json encoder's encodeByteSlice. -// It is used to base64 encode a byte slice. -func encodeByteSlice(w writer, scratch []byte, v []byte) { - if v == nil { - return - } - - encodedLen := base64.StdEncoding.EncodedLen(len(v)) - if encodedLen <= len(scratch) { - // If the encoded bytes fit in e.scratch, avoid an extra - // allocation and use the cheaper Encoding.Encode. - dst := scratch[:encodedLen] - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else if encodedLen <= 1024 { - // The encoded bytes are short enough to allocate for, and - // Encoding.Encode is still cheaper. - dst := make([]byte, encodedLen) - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else { - // The encoded bytes are too long to cheaply allocate, and - // Encoding.Encode is no longer noticeably cheaper. - enc := base64.NewEncoder(base64.StdEncoding, w) - enc.Write(v) - enc.Close() - } -} - -// IsFlattened returns true if value is for flattened shape. -func (xv Value) IsFlattened() bool { - return xv.isFlattened -} - -// Close closes the value. -func (xv Value) Close() { - writeEndElement(xv.w, xv.startElement.End()) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go deleted file mode 100644 index dc4eebdffa72..000000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go +++ /dev/null @@ -1,154 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "strings" -) - -// NodeDecoder is a XML decoder wrapper that is responsible to decoding -// a single XML Node element and it's nested member elements. This wrapper decoder -// takes in the start element of the top level node being decoded. -type NodeDecoder struct { - Decoder *xml.Decoder - StartEl xml.StartElement -} - -// WrapNodeDecoder returns an initialized XMLNodeDecoder -func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { - return NodeDecoder{ - Decoder: decoder, - StartEl: startEl, - } -} - -// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the -// a token is the node decoder's end node token; and an error which indicates any error -// that occurred while retrieving the start element -func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { - for { - token, e := d.Decoder.Token() - if e != nil { - return t, done, e - } - - // check if we reach end of the node being decoded - if el, ok := token.(xml.EndElement); ok { - return t, el == d.StartEl.End(), err - } - - if t, ok := token.(xml.StartElement); ok { - return restoreAttrNamespaces(t), false, err - } - - // skip token if it is a comment or preamble or empty space value due to indentation - // or if it's a value and is not expected - } -} - -// restoreAttrNamespaces update XML attributes to restore the short namespaces found within -// the raw XML document. -func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { - if len(node.Attr) == 0 { - return node - } - - // Generate a mapping of XML namespace values to their short names. - ns := map[string]string{} - for _, a := range node.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - break - } - } - - for i, a := range node.Attr { - if a.Name.Space == "xmlns" { - continue - } - // By default, xml.Decoder will fully resolve these namespaces. So if you had - // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to - // continue to resolve as `bar` so we can easily identify it later on. - if v, ok := ns[node.Attr[i].Name.Space]; ok { - node.Attr[i].Name.Space = v - } - } - return node -} - -// GetElement looks for the given tag name at the current level, and returns the element if found, and -// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking -// the document. -func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { - for { - token, done, err := d.Token() - if err != nil { - return t, err - } - if done { - return t, fmt.Errorf("%s node not found", name) - } - switch { - case strings.EqualFold(name, token.Name.Local): - return token, nil - default: - err = d.Decoder.Skip() - if err != nil { - return t, err - } - } - } -} - -// Value provides an abstraction to retrieve char data value within an xml element. -// The method will return an error if it encounters a nested xml element instead of char data. -// This method should only be used to retrieve simple type or blob shape values as []byte. -func (d NodeDecoder) Value() (c []byte, err error) { - t, e := d.Decoder.Token() - if e != nil { - return c, e - } - - endElement := d.StartEl.End() - - switch ev := t.(type) { - case xml.CharData: - c = ev.Copy() - case xml.EndElement: // end tag or self-closing - if ev == endElement { - return []byte{}, err - } - return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) - default: - return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) - } - - t, e = d.Decoder.Token() - if e != nil { - return c, e - } - - if ev, ok := t.(xml.EndElement); ok { - if ev == endElement { - return c, err - } - } - - return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) -} - -// FetchRootElement takes in a decoder and returns the first start element within the xml body. -// This function is useful in fetching the start element of an XML response and ignore the -// comments and preamble -func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { - for { - t, e := decoder.Token() - if e != nil { - return startElement, e - } - - if startElement, ok := t.(xml.StartElement); ok { - return startElement, err - } - } -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go deleted file mode 100644 index f778272be300..000000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ /dev/null @@ -1,23 +0,0 @@ -package transport - -import ( - "net/http" - "net/url" - - "github.com/aws/smithy-go" -) - -// Endpoint is the endpoint object returned by Endpoint resolution V2 -type Endpoint struct { - // The complete URL minimally specifying the scheme and host. - // May optionally specify the port and base path component. - URI url.URL - - // An optional set of headers to be sent using transport layer headers. - Headers http.Header - - // A grab-bag property map of endpoint attributes. The - // values present here are subject to change, or being add/removed at any - // time. - Properties smithy.Properties -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go deleted file mode 100644 index e24e190dca73..000000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package rulesfn provides endpoint rule functions for evaluating endpoint -// resolution rules. - -package rulesfn diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go deleted file mode 100644 index 5cf4a7b02d77..000000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go +++ /dev/null @@ -1,25 +0,0 @@ -package rulesfn - -// Substring returns the substring of the input provided. If the start or stop -// indexes are not valid for the input nil will be returned. If errors occur -// they will be added to the provided [ErrorCollector]. -func SubString(input string, start, stop int, reverse bool) *string { - if start < 0 || stop < 1 || start >= stop || len(input) < stop { - return nil - } - - for _, r := range input { - if r > 127 { - return nil - } - } - - if !reverse { - v := input[start:stop] - return &v - } - - rStart := len(input) - stop - rStop := len(input) - start - return SubString(input, rStart, rStop, false) -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go deleted file mode 100644 index 0c11541276ba..000000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go +++ /dev/null @@ -1,130 +0,0 @@ -package rulesfn - -import ( - "fmt" - "net" - "net/url" - "strings" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// IsValidHostLabel returns if the input is a single valid [RFC 1123] host -// label. If allowSubDomains is true, will allow validation to include nested -// host labels. Returns false if the input is not a valid host label. If errors -// occur they will be added to the provided [ErrorCollector]. -// -// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt -func IsValidHostLabel(input string, allowSubDomains bool) bool { - var labels []string - if allowSubDomains { - labels = strings.Split(input, ".") - } else { - labels = []string{input} - } - - for _, label := range labels { - if !smithyhttp.ValidHostLabel(label) { - return false - } - } - - return true -} - -// ParseURL returns a [URL] if the provided string could be parsed. Returns nil -// if the string could not be parsed. Any parsing error will be added to the -// [ErrorCollector]. -// -// If the input URL string contains an IP6 address with a zone index. The -// returned [builtin.URL.Authority] value will contain the percent escaped (%) -// zone index separator. -func ParseURL(input string) *URL { - u, err := url.Parse(input) - if err != nil { - return nil - } - - if u.RawQuery != "" { - return nil - } - - if u.Scheme != "http" && u.Scheme != "https" { - return nil - } - - normalizedPath := u.Path - if !strings.HasPrefix(normalizedPath, "/") { - normalizedPath = "/" + normalizedPath - } - if !strings.HasSuffix(normalizedPath, "/") { - normalizedPath = normalizedPath + "/" - } - - // IP6 hosts may have zone indexes that need to be escaped to be valid in a - // URI. The Go URL parser will unescape the `%25` into `%`. This needs to - // be reverted since the returned URL will be used in string builders. - authority := strings.ReplaceAll(u.Host, "%", "%25") - - return &URL{ - Scheme: u.Scheme, - Authority: authority, - Path: u.Path, - NormalizedPath: normalizedPath, - IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil, - } -} - -// URL provides the structure describing the parts of a parsed URL returned by -// [ParseURL]. -type URL struct { - Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1 - Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2 - Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3 - NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3 - IsIp bool -} - -// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the -// input string. -// -// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1 -func URIEncode(input string) string { - var output strings.Builder - for _, c := range []byte(input) { - if validPercentEncodedChar(c) { - output.WriteByte(c) - continue - } - - fmt.Fprintf(&output, "%%%X", c) - } - - return output.String() -} - -func validPercentEncodedChar(c byte) bool { - return (c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') || - c == '-' || c == '_' || c == '.' || c == '~' -} - -// hostname implements u.Hostname() but strips the ipv6 zone ID (if present) -// such that net.ParseIP can still recognize IPv6 addresses with zone IDs. -// -// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take -// that package as a dependency yet due to our min go version (1.15, netip -// starts in 1.18). When we align with go runtime deprecation policy in -// 10/2023, we can remove this. -func hostnameWithoutZone(u *url.URL) string { - full := u.Hostname() - - // this more or less mimics the internals of net/ (see unexported - // splitHostZone in that source) but throws the zone away because we don't - // need it - if i := strings.LastIndex(full, "%"); i > -1 { - return full[:i] - } - return full -} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go deleted file mode 100644 index d6948d020623..000000000000 --- a/vendor/github.com/aws/smithy-go/errors.go +++ /dev/null @@ -1,137 +0,0 @@ -package smithy - -import "fmt" - -// APIError provides the generic API and protocol agnostic error type all SDK -// generated exception types will implement. -type APIError interface { - error - - // ErrorCode returns the error code for the API exception. - ErrorCode() string - // ErrorMessage returns the error message for the API exception. - ErrorMessage() string - // ErrorFault returns the fault for the API exception. - ErrorFault() ErrorFault -} - -// GenericAPIError provides a generic concrete API error type that SDKs can use -// to deserialize error responses into. Should be used for unmodeled or untyped -// errors. -type GenericAPIError struct { - Code string - Message string - Fault ErrorFault -} - -// ErrorCode returns the error code for the API exception. -func (e *GenericAPIError) ErrorCode() string { return e.Code } - -// ErrorMessage returns the error message for the API exception. -func (e *GenericAPIError) ErrorMessage() string { return e.Message } - -// ErrorFault returns the fault for the API exception. -func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } - -func (e *GenericAPIError) Error() string { - return fmt.Sprintf("api error %s: %s", e.Code, e.Message) -} - -var _ APIError = (*GenericAPIError)(nil) - -// OperationError decorates an underlying error which occurred while invoking -// an operation with names of the operation and API. -type OperationError struct { - ServiceID string - OperationName string - Err error -} - -// Service returns the name of the API service the error occurred with. -func (e *OperationError) Service() string { return e.ServiceID } - -// Operation returns the name of the API operation the error occurred with. -func (e *OperationError) Operation() string { return e.OperationName } - -// Unwrap returns the nested error if any, or nil. -func (e *OperationError) Unwrap() error { return e.Err } - -func (e *OperationError) Error() string { - return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) -} - -// DeserializationError provides a wrapper for an error that occurs during -// deserialization. -type DeserializationError struct { - Err error // original error - Snapshot []byte -} - -// Error returns a formatted error for DeserializationError -func (e *DeserializationError) Error() string { - const msg = "deserialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s, %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in DeserializationError -func (e *DeserializationError) Unwrap() error { return e.Err } - -// ErrorFault provides the type for a Smithy API error fault. -type ErrorFault int - -// ErrorFault enumeration values -const ( - FaultUnknown ErrorFault = iota - FaultServer - FaultClient -) - -func (f ErrorFault) String() string { - switch f { - case FaultServer: - return "server" - case FaultClient: - return "client" - default: - return "unknown" - } -} - -// SerializationError represents an error that occurred while attempting to serialize a request -type SerializationError struct { - Err error // original error -} - -// Error returns a formatted error for SerializationError -func (e *SerializationError) Error() string { - const msg = "serialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s: %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in SerializationError -func (e *SerializationError) Unwrap() error { return e.Err } - -// CanceledError is the error that will be returned by an API request that was -// canceled. API operations given a Context may return this error when -// canceled. -type CanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*CanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *CanceledError) Unwrap() error { - return e.Err -} - -func (e *CanceledError) Error() string { - return fmt.Sprintf("canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go deleted file mode 100644 index cbbaabee9ef8..000000000000 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package smithy - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.5" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE deleted file mode 100644 index fe6a62006a52..000000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go deleted file mode 100644 index 9c9d02b94b91..000000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package singleflight provides a duplicate function call suppression -// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight -// package. The package is forked, because the package a part of the unstable -// and unversioned golang.org/x/sync module. -// -// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight - -package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index e8a1b17d5640..000000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package singleflight - -import ( - "bytes" - "errors" - "fmt" - "runtime" - "runtime/debug" - "sync" -) - -// errGoexit indicates the runtime.Goexit was called in -// the user given function. -var errGoexit = errors.New("runtime.Goexit was called") - -// A panicError is an arbitrary value recovered from a panic -// with the stack trace during the execution of given function. -type panicError struct { - value interface{} - stack []byte -} - -// Error implements error interface. -func (p *panicError) Error() string { - return fmt.Sprintf("%v\n\n%s", p.value, p.stack) -} - -func newPanicError(v interface{}) error { - stack := debug.Stack() - - // The first line of the stack trace is of the form "goroutine N [status]:" - // but by the time the panic reaches Do the goroutine may no longer exist - // and its status will have changed. Trim out the misleading line. - if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { - stack = stack[line+1:] - } - return &panicError{value: v, stack: stack} -} - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - - if e, ok := c.err.(*panicError); ok { - panic(e) - } else if c.err == errGoexit { - runtime.Goexit() - } - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -// -// The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - normalReturn := false - recovered := false - - // use double-defer to distinguish panic from runtime.Goexit, - // more details see https://golang.org/cl/134395 - defer func() { - // the given function invoked runtime.Goexit - if !normalReturn && !recovered { - c.err = errGoexit - } - - c.wg.Done() - g.mu.Lock() - defer g.mu.Unlock() - if !c.forgotten { - delete(g.m, key) - } - - if e, ok := c.err.(*panicError); ok { - // In order to prevent the waiting channels from being blocked forever, - // needs to ensure that this panic cannot be recovered. - if len(c.chans) > 0 { - go panic(e) - select {} // Keep this goroutine around so that it will appear in the crash dump. - } else { - panic(e) - } - } else if c.err == errGoexit { - // Already in the process of goexit, no need to call again - } else { - // Normal return - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - } - }() - - func() { - defer func() { - if !normalReturn { - // Ideally, we would wait to take a stack trace until we've determined - // whether this is a panic or a runtime.Goexit. - // - // Unfortunately, the only way we can distinguish the two is to see - // whether the recover stopped the goroutine from terminating, and by - // the time we know that, the part of the stack trace relevant to the - // panic has been discarded. - if r := recover(); r != nil { - c.err = newPanicError(r) - } - } - }() - - c.val, c.err = fn() - normalReturn = true - }() - - if !normalReturn { - recovered = true - } -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go deleted file mode 100644 index f8417c15b85b..000000000000 --- a/vendor/github.com/aws/smithy-go/io/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package io - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go deleted file mode 100644 index a6a33eaf5672..000000000000 --- a/vendor/github.com/aws/smithy-go/io/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package io provides utilities for Smithy generated API clients. -package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go deleted file mode 100644 index 07063f2960d8..000000000000 --- a/vendor/github.com/aws/smithy-go/io/reader.go +++ /dev/null @@ -1,16 +0,0 @@ -package io - -import ( - "io" -) - -// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method -// that does nothing. -type ReadSeekNopCloser struct { - io.ReadSeeker -} - -// Close does nothing. -func (ReadSeekNopCloser) Close() error { - return nil -} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go deleted file mode 100644 index 06b476add8a2..000000000000 --- a/vendor/github.com/aws/smithy-go/io/ringbuffer.go +++ /dev/null @@ -1,94 +0,0 @@ -package io - -import ( - "bytes" - "io" -) - -// RingBuffer struct satisfies io.ReadWrite interface. -// -// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a -// revolving window. -type RingBuffer struct { - slice []byte - start int - end int - size int -} - -// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. -func NewRingBuffer(slice []byte) *RingBuffer { - ringBuf := RingBuffer{ - slice: slice, - } - return &ringBuf -} - -// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. -func (r *RingBuffer) Write(p []byte) (int, error) { - for _, b := range p { - // check if end points to invalid index, we need to circle back - if r.end == len(r.slice) { - r.end = 0 - } - // check if start points to invalid index, we need to circle back - if r.start == len(r.slice) { - r.start = 0 - } - // if ring buffer is filled, increment the start index - if r.size == len(r.slice) { - r.size-- - r.start++ - } - - r.slice[r.end] = b - r.end++ - r.size++ - } - return len(p), nil -} - -// Read copies the data on the ring buffer into the byte slice provided to the method. -// Returns the read count along with any error encountered while reading. -func (r *RingBuffer) Read(p []byte) (int, error) { - // readCount keeps track of the number of bytes read - var readCount int - for j := 0; j < len(p); j++ { - // if ring buffer is empty or completely read - // return EOF error. - if r.size == 0 { - return readCount, io.EOF - } - - if r.start == len(r.slice) { - r.start = 0 - } - - p[j] = r.slice[r.start] - readCount++ - // increment the start pointer for ring buffer - r.start++ - // decrement the size of ring buffer - r.size-- - } - return readCount, nil -} - -// Len returns the number of unread bytes in the buffer. -func (r *RingBuffer) Len() int { - return r.size -} - -// Bytes returns a copy of the RingBuffer's bytes. -func (r RingBuffer) Bytes() []byte { - var b bytes.Buffer - io.Copy(&b, &r) - return b.Bytes() -} - -// Reset resets the ring buffer. -func (r *RingBuffer) Reset() { - *r = RingBuffer{ - slice: r.slice, - } -} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh deleted file mode 100644 index 800bf3769542..000000000000 --- a/vendor/github.com/aws/smithy-go/local-mod-replace.sh +++ /dev/null @@ -1,39 +0,0 @@ -#1/usr/bin/env bash - -PROJECT_DIR="" -SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) - -usage() { - echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 - exit 1 -} - -while getopts "hs:d:" options; do - case "${options}" in - s) - SMITHY_SOURCE_DIR=${OPTARG} - if [ "$SMITHY_SOURCE_DIR" == "" ]; then - echo "path to smithy-go source directory is required" || exit - usage - fi - ;; - d) - PROJECT_DIR=${OPTARG} - ;; - h) - usage - ;; - *) - usage - ;; - esac -done - -if [ "$PROJECT_DIR" != "" ]; then - cd $PROJECT_DIR || exit -fi - -go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do - repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} - echo -replace $x=$repPath -done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go deleted file mode 100644 index 2071924bd306..000000000000 --- a/vendor/github.com/aws/smithy-go/logging/logger.go +++ /dev/null @@ -1,82 +0,0 @@ -package logging - -import ( - "context" - "io" - "log" -) - -// Classification is the type of the log entry's classification name. -type Classification string - -// Set of standard classifications that can be used by clients and middleware -const ( - Warn Classification = "WARN" - Debug Classification = "DEBUG" -) - -// Logger is an interface for logging entries at certain classifications. -type Logger interface { - // Logf is expected to support the standard fmt package "verbs". - Logf(classification Classification, format string, v ...interface{}) -} - -// LoggerFunc is a wrapper around a function to satisfy the Logger interface. -type LoggerFunc func(classification Classification, format string, v ...interface{}) - -// Logf delegates the logging request to the wrapped function. -func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { - f(classification, format, v...) -} - -// ContextLogger is an optional interface a Logger implementation may expose that provides -// the ability to create context aware log entries. -type ContextLogger interface { - WithContext(context.Context) Logger -} - -// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting -// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will -// be returned to the caller. -func WithContext(ctx context.Context, logger Logger) Logger { - if logger == nil { - return Nop{} - } - - cl, ok := logger.(ContextLogger) - if !ok { - return logger - } - - return cl.WithContext(ctx) -} - -// Nop is a Logger implementation that simply does not perform any logging. -type Nop struct{} - -// Logf simply returns without performing any action -func (n Nop) Logf(Classification, string, ...interface{}) { - return -} - -// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's -// Printf method. -type StandardLogger struct { - Logger *log.Logger -} - -// Logf logs the given classification and message to the underlying logger. -func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { - if len(classification) != 0 { - format = string(classification) + " " + format - } - - s.Logger.Printf(format, v...) -} - -// NewStandardLogger returns a new StandardLogger -func NewStandardLogger(writer io.Writer) *StandardLogger { - return &StandardLogger{ - Logger: log.New(writer, "SDK ", log.LstdFlags), - } -} diff --git a/vendor/github.com/aws/smithy-go/metrics/metrics.go b/vendor/github.com/aws/smithy-go/metrics/metrics.go deleted file mode 100644 index c009d9f278b6..000000000000 --- a/vendor/github.com/aws/smithy-go/metrics/metrics.go +++ /dev/null @@ -1,136 +0,0 @@ -// Package metrics defines the metrics APIs used by Smithy clients. -package metrics - -import ( - "context" - - "github.com/aws/smithy-go" -) - -// MeterProvider is the entry point for creating a Meter. -type MeterProvider interface { - Meter(scope string, opts ...MeterOption) Meter -} - -// MeterOption applies configuration to a Meter. -type MeterOption func(o *MeterOptions) - -// MeterOptions represents configuration for a Meter. -type MeterOptions struct { - Properties smithy.Properties -} - -// Meter is the entry point for creation of measurement instruments. -type Meter interface { - // integer/synchronous - Int64Counter(name string, opts ...InstrumentOption) (Int64Counter, error) - Int64UpDownCounter(name string, opts ...InstrumentOption) (Int64UpDownCounter, error) - Int64Gauge(name string, opts ...InstrumentOption) (Int64Gauge, error) - Int64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) - - // integer/asynchronous - Int64AsyncCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Int64AsyncUpDownCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Int64AsyncGauge(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - - // floating-point/synchronous - Float64Counter(name string, opts ...InstrumentOption) (Float64Counter, error) - Float64UpDownCounter(name string, opts ...InstrumentOption) (Float64UpDownCounter, error) - Float64Gauge(name string, opts ...InstrumentOption) (Float64Gauge, error) - Float64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) - - // floating-point/asynchronous - Float64AsyncCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Float64AsyncUpDownCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Float64AsyncGauge(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) -} - -// InstrumentOption applies configuration to an instrument. -type InstrumentOption func(o *InstrumentOptions) - -// InstrumentOptions represents configuration for an instrument. -type InstrumentOptions struct { - UnitLabel string - Description string -} - -// Int64Counter measures a monotonically increasing int64 value. -type Int64Counter interface { - Add(context.Context, int64, ...RecordMetricOption) -} - -// Int64UpDownCounter measures a fluctuating int64 value. -type Int64UpDownCounter interface { - Add(context.Context, int64, ...RecordMetricOption) -} - -// Int64Gauge samples a discrete int64 value. -type Int64Gauge interface { - Sample(context.Context, int64, ...RecordMetricOption) -} - -// Int64Histogram records multiple data points for an int64 value. -type Int64Histogram interface { - Record(context.Context, int64, ...RecordMetricOption) -} - -// Float64Counter measures a monotonically increasing float64 value. -type Float64Counter interface { - Add(context.Context, float64, ...RecordMetricOption) -} - -// Float64UpDownCounter measures a fluctuating float64 value. -type Float64UpDownCounter interface { - Add(context.Context, float64, ...RecordMetricOption) -} - -// Float64Gauge samples a discrete float64 value. -type Float64Gauge interface { - Sample(context.Context, float64, ...RecordMetricOption) -} - -// Float64Histogram records multiple data points for an float64 value. -type Float64Histogram interface { - Record(context.Context, float64, ...RecordMetricOption) -} - -// AsyncInstrument is the universal handle returned for creation of all async -// instruments. -// -// Callers use the Stop() API to unregister the callback passed at instrument -// creation. -type AsyncInstrument interface { - Stop() -} - -// Int64Callback describes a function invoked when an async int64 instrument is -// read. -type Int64Callback func(context.Context, Int64Observer) - -// Int64Observer is the interface passed to async int64 instruments. -// -// Callers use the Observe() API of this interface to report metrics to the -// underlying collector. -type Int64Observer interface { - Observe(context.Context, int64, ...RecordMetricOption) -} - -// Float64Callback describes a function invoked when an async float64 -// instrument is read. -type Float64Callback func(context.Context, Float64Observer) - -// Float64Observer is the interface passed to async int64 instruments. -// -// Callers use the Observe() API of this interface to report metrics to the -// underlying collector. -type Float64Observer interface { - Observe(context.Context, float64, ...RecordMetricOption) -} - -// RecordMetricOption applies configuration to a recorded metric. -type RecordMetricOption func(o *RecordMetricOptions) - -// RecordMetricOptions represents configuration for a recorded metric. -type RecordMetricOptions struct { - Properties smithy.Properties -} diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go deleted file mode 100644 index fb374e1fb850..000000000000 --- a/vendor/github.com/aws/smithy-go/metrics/nop.go +++ /dev/null @@ -1,67 +0,0 @@ -package metrics - -import "context" - -// NopMeterProvider is a no-op metrics implementation. -type NopMeterProvider struct{} - -var _ MeterProvider = (*NopMeterProvider)(nil) - -// Meter returns a meter which creates no-op instruments. -func (NopMeterProvider) Meter(string, ...MeterOption) Meter { - return nopMeter{} -} - -type nopMeter struct{} - -var _ Meter = (*nopMeter)(nil) - -func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil -} - -type nopInstrument[N any] struct{} - -func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {} -func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {} -func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {} -func (nopInstrument[_]) Stop() {} diff --git a/vendor/github.com/aws/smithy-go/middleware/context.go b/vendor/github.com/aws/smithy-go/middleware/context.go deleted file mode 100644 index f51aa4f04fc4..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/context.go +++ /dev/null @@ -1,41 +0,0 @@ -package middleware - -import "context" - -type ( - serviceIDKey struct{} - operationNameKey struct{} -) - -// WithServiceID adds a service ID to the context, scoped to middleware stack -// values. -// -// This API is called in the client runtime when bootstrapping an operation and -// should not typically be used directly. -func WithServiceID(parent context.Context, id string) context.Context { - return WithStackValue(parent, serviceIDKey{}, id) -} - -// GetServiceID retrieves the service ID from the context. This is typically -// the service shape's name from its Smithy model. Service clients for specific -// systems (e.g. AWS SDK) may use an alternate designated value. -func GetServiceID(ctx context.Context) string { - id, _ := GetStackValue(ctx, serviceIDKey{}).(string) - return id -} - -// WithOperationName adds the operation name to the context, scoped to -// middleware stack values. -// -// This API is called in the client runtime when bootstrapping an operation and -// should not typically be used directly. -func WithOperationName(parent context.Context, id string) context.Context { - return WithStackValue(parent, operationNameKey{}, id) -} - -// GetOperationName retrieves the operation name from the context. This is -// typically the operation shape's name from its Smithy model. -func GetOperationName(ctx context.Context) string { - name, _ := GetStackValue(ctx, operationNameKey{}).(string) - return name -} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go deleted file mode 100644 index 9858928a7f83..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -// Package middleware provides transport agnostic middleware for decorating SDK -// handlers. -// -// The Smithy middleware stack provides ordered behavior to be invoked on an -// underlying handler. The stack is separated into steps that are invoked in a -// static order. A step is a collection of middleware that are injected into a -// ordered list defined by the user. The user may add, insert, swap, and remove a -// step's middleware. When the stack is invoked the step middleware become static, -// and their order cannot be modified. -// -// A stack and its step middleware are **not** safe to modify concurrently. -// -// A stack will use the ordered list of middleware to decorate a underlying -// handler. A handler could be something like an HTTP Client that round trips an -// API operation over HTTP. -// -// Smithy Middleware Stack -// -// A Stack is a collection of middleware that wrap a handler. The stack can be -// broken down into discreet steps. Each step may contain zero or more middleware -// specific to that stack's step. -// -// A Stack Step is a predefined set of middleware that are invoked in a static -// order by the Stack. These steps represent fixed points in the middleware stack -// for organizing specific behavior, such as serialize and build. A Stack Step is -// composed of zero or more middleware that are specific to that step. A step may -// define its own set of input/output parameters the generic input/output -// parameters are cast from. A step calls its middleware recursively, before -// calling the next step in the stack returning the result or error of the step -// middleware decorating the underlying handler. -// -// * Initialize: Prepares the input, and sets any default parameters as needed, -// (e.g. idempotency token, and presigned URLs). -// -// * Serialize: Serializes the prepared input into a data structure that can be -// consumed by the target transport's message, (e.g. REST-JSON serialization). -// -// * Build: Adds additional metadata to the serialized transport message, (e.g. -// HTTP's Content-Length header, or body checksum). Decorations and -// modifications to the message should be copied to all message attempts. -// -// * Finalize: Performs final preparations needed before sending the message. The -// message should already be complete by this stage, and is only alternated to -// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request -// signing). -// -// * Deserialize: Reacts to the handler's response returned by the recipient of -// the request message. Deserializes the response into a structured type or -// error above stacks can react to. -// -// Adding Middleware to a Stack Step -// -// Middleware can be added to a step front or back, or relative, by name, to an -// existing middleware in that stack. If a middleware does not have a name, a -// unique name will be generated at the middleware and be added to the step. -// -// // Create middleware stack -// stack := middleware.NewStack() -// -// // Add middleware to stack steps -// stack.Initialize.Add(paramValidationMiddleware, middleware.After) -// stack.Serialize.Add(marshalOperationFoo, middleware.After) -// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) -// -// // Invoke middleware on handler. -// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) -package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go deleted file mode 100644 index c2f0dbb6bda9..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/logging.go +++ /dev/null @@ -1,46 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/smithy-go/logging" -) - -// loggerKey is the context value key for which the logger is associated with. -type loggerKey struct{} - -// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger -// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed -// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. -func GetLogger(ctx context.Context) logging.Logger { - logger, ok := ctx.Value(loggerKey{}).(logging.Logger) - if !ok || logger == nil { - return logging.Nop{} - } - - return logging.WithContext(ctx, logger) -} - -// SetLogger sets the provided logger value on the provided ctx. -func SetLogger(ctx context.Context, logger logging.Logger) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -type setLogger struct { - Logger logging.Logger -} - -// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. -func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { - return stack.Initialize.Add(&setLogger{Logger: logger}, After) -} - -func (a *setLogger) ID() string { - return "SetLogger" -} - -func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, -) { - return next.HandleInitialize(SetLogger(ctx, a.Logger), in) -} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go deleted file mode 100644 index 7bb7dbcf5a05..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/metadata.go +++ /dev/null @@ -1,65 +0,0 @@ -package middleware - -// MetadataReader provides an interface for reading metadata from the -// underlying metadata container. -type MetadataReader interface { - Get(key interface{}) interface{} -} - -// Metadata provides storing and reading metadata values. Keys may be any -// comparable value type. Get and set will panic if key is not a comparable -// value type. -// -// Metadata uses lazy initialization, and Set method must be called as an -// addressable value, or pointer. Not doing so may cause key/value pair to not -// be set. -type Metadata struct { - values map[interface{}]interface{} -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m Metadata) Get(key interface{}) interface{} { - return m.values[key] -} - -// Clone creates a shallow copy of Metadata entries, returning a new Metadata -// value with the original entries copied into it. -func (m Metadata) Clone() Metadata { - vs := make(map[interface{}]interface{}, len(m.values)) - for k, v := range m.values { - vs[k] = v - } - - return Metadata{ - values: vs, - } -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Set method must be called as an addressable value, or pointer. If Set is not -// called as an addressable value or pointer, the key value pair being set may -// be lost. -// -// Panics if the key type is not comparable. -func (m *Metadata) Set(key, value interface{}) { - if m.values == nil { - m.values = map[interface{}]interface{}{} - } - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m Metadata) Has(key interface{}) bool { - if m.values == nil { - return false - } - _, ok := m.values[key] - return ok -} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go deleted file mode 100644 index 803b7c751840..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/middleware.go +++ /dev/null @@ -1,71 +0,0 @@ -package middleware - -import ( - "context" -) - -// Handler provides the interface for performing the logic to obtain an output, -// or error for the given input. -type Handler interface { - // Handle performs logic to obtain an output for the given input. Handler - // should be decorated with middleware to perform input specific behavior. - Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, - ) -} - -// HandlerFunc provides a wrapper around a function pointer to be used as a -// middleware handler. -type HandlerFunc func(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) - -// Handle invokes the underlying function, returning the result. -func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) { - return fn(ctx, input) -} - -// Middleware provides the interface to call handlers in a chain. -type Middleware interface { - // ID provides a unique identifier for the middleware. - ID() string - - // Performs the middleware's handling of the input, returning the output, - // or error. The middleware can invoke the next Handler if handling should - // continue. - HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( - output interface{}, metadata Metadata, err error, - ) -} - -// decoratedHandler wraps a middleware in order to to call the next handler in -// the chain. -type decoratedHandler struct { - // The next handler to be called. - Next Handler - - // The current middleware decorating the handler. - With Middleware -} - -// Handle implements the Handler interface to handle a operation invocation. -func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) { - return m.With.HandleMiddleware(ctx, input, m.Next) -} - -// DecorateHandler decorates a handler with a middleware. Wrapping the handler -// with the middleware. -func DecorateHandler(h Handler, with ...Middleware) Handler { - for i := len(with) - 1; i >= 0; i-- { - h = decoratedHandler{ - Next: h, - With: with[i], - } - } - - return h -} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go deleted file mode 100644 index 4b195308c599..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go +++ /dev/null @@ -1,268 +0,0 @@ -package middleware - -import "fmt" - -// RelativePosition provides specifying the relative position of a middleware -// in an ordered group. -type RelativePosition int - -// Relative position for middleware in steps. -const ( - After RelativePosition = iota - Before -) - -type ider interface { - ID() string -} - -// orderedIDs provides an ordered collection of items with relative ordering -// by name. -type orderedIDs struct { - order *relativeOrder - items map[string]ider -} - -const baseOrderedItems = 5 - -func newOrderedIDs() *orderedIDs { - return &orderedIDs{ - order: newRelativeOrder(), - items: make(map[string]ider, baseOrderedItems), - } -} - -// Add injects the item to the relative position of the item group. Returns an -// error if the item already exists. -func (g *orderedIDs) Add(m ider, pos RelativePosition) error { - id := m.ID() - if len(id) == 0 { - return fmt.Errorf("empty ID, ID must not be empty") - } - - if err := g.order.Add(pos, id); err != nil { - return err - } - - g.items[id] = m - return nil -} - -// Insert injects the item relative to an existing item id. Returns an error if -// the original item does not exist, or the item being added already exists. -func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { - if len(m.ID()) == 0 { - return fmt.Errorf("insert ID must not be empty") - } - if len(relativeTo) == 0 { - return fmt.Errorf("relative to ID must not be empty") - } - - if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { - return err - } - - g.items[m.ID()] = m - return nil -} - -// Get returns the ider identified by id. If ider is not present, returns false. -func (g *orderedIDs) Get(id string) (ider, bool) { - v, ok := g.items[id] - return v, ok -} - -// Swap removes the item by id, replacing it with the new item. Returns an error -// if the original item doesn't exist. -func (g *orderedIDs) Swap(id string, m ider) (ider, error) { - if len(id) == 0 { - return nil, fmt.Errorf("swap from ID must not be empty") - } - - iderID := m.ID() - if len(iderID) == 0 { - return nil, fmt.Errorf("swap to ID must not be empty") - } - - if err := g.order.Swap(id, iderID); err != nil { - return nil, err - } - - removed := g.items[id] - - delete(g.items, id) - g.items[iderID] = m - - return removed, nil -} - -// Remove removes the item by id. Returns an error if the item -// doesn't exist. -func (g *orderedIDs) Remove(id string) (ider, error) { - if len(id) == 0 { - return nil, fmt.Errorf("remove ID must not be empty") - } - - if err := g.order.Remove(id); err != nil { - return nil, err - } - - removed := g.items[id] - delete(g.items, id) - return removed, nil -} - -func (g *orderedIDs) List() []string { - items := g.order.List() - order := make([]string, len(items)) - copy(order, items) - return order -} - -// Clear removes all entries and slots. -func (g *orderedIDs) Clear() { - g.order.Clear() - g.items = map[string]ider{} -} - -// GetOrder returns the item in the order it should be invoked in. -func (g *orderedIDs) GetOrder() []interface{} { - order := g.order.List() - ordered := make([]interface{}, len(order)) - for i := 0; i < len(order); i++ { - ordered[i] = g.items[order[i]] - } - - return ordered -} - -// relativeOrder provides ordering of item -type relativeOrder struct { - order []string -} - -func newRelativeOrder() *relativeOrder { - return &relativeOrder{ - order: make([]string, 0, baseOrderedItems), - } -} - -// Add inserts an item into the order relative to the position provided. -func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { - if len(ids) == 0 { - return nil - } - - for _, id := range ids { - if _, ok := s.has(id); ok { - return fmt.Errorf("already exists, %v", id) - } - } - - switch pos { - case Before: - return s.insert(0, Before, ids...) - - case After: - s.order = append(s.order, ids...) - - default: - return fmt.Errorf("invalid position, %v", int(pos)) - } - - return nil -} - -// Insert injects an item before or after the relative item. Returns -// an error if the relative item does not exist. -func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { - if len(ids) == 0 { - return nil - } - - for _, id := range ids { - if _, ok := s.has(id); ok { - return fmt.Errorf("already exists, %v", id) - } - } - - i, ok := s.has(relativeTo) - if !ok { - return fmt.Errorf("not found, %v", relativeTo) - } - - return s.insert(i, pos, ids...) -} - -// Swap will replace the item id with the to item. Returns an -// error if the original item id does not exist. Allows swapping out an -// item for another item with the same id. -func (s *relativeOrder) Swap(id, to string) error { - i, ok := s.has(id) - if !ok { - return fmt.Errorf("not found, %v", id) - } - - if _, ok = s.has(to); ok && id != to { - return fmt.Errorf("already exists, %v", to) - } - - s.order[i] = to - return nil -} - -func (s *relativeOrder) Remove(id string) error { - i, ok := s.has(id) - if !ok { - return fmt.Errorf("not found, %v", id) - } - - s.order = append(s.order[:i], s.order[i+1:]...) - return nil -} - -func (s *relativeOrder) List() []string { - return s.order -} - -func (s *relativeOrder) Clear() { - s.order = s.order[0:0] -} - -func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { - switch pos { - case Before: - n := len(ids) - var src []string - if n <= cap(s.order)-len(s.order) { - s.order = s.order[:len(s.order)+n] - src = s.order - } else { - src = s.order - s.order = make([]string, len(s.order)+n) - copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half - } - copy(s.order[i+n:], src[i:]) - copy(s.order[i:], ids) - case After: - if i == len(s.order)-1 || len(s.order) == 0 { - s.order = append(s.order, ids...) - } else { - s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) - } - - default: - return fmt.Errorf("invalid position, %v", int(pos)) - } - - return nil -} - -func (s *relativeOrder) has(id string) (i int, found bool) { - for i := 0; i < len(s.order); i++ { - if s.order[i] == id { - return i, true - } - } - return 0, false -} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go deleted file mode 100644 index 45ccb5b93c9f..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/stack.go +++ /dev/null @@ -1,209 +0,0 @@ -package middleware - -import ( - "context" - "io" - "strings" -) - -// Stack provides protocol and transport agnostic set of middleware split into -// distinct steps. Steps have specific transitions between them, that are -// managed by the individual step. -// -// Steps are composed as middleware around the underlying handler in the -// following order: -// -// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler -// -// Any middleware within the chain may choose to stop and return an error or -// response. Since the middleware decorate the handler like a call stack, each -// middleware will receive the result of the next middleware in the chain. -// Middleware that does not need to react to an input, or result must forward -// along the input down the chain, or return the result back up the chain. -// -// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler -type Stack struct { - // Initialize prepares the input, and sets any default parameters as - // needed, (e.g. idempotency token, and presigned URLs). - // - // Takes Input Parameters, and returns result or error. - // - // Receives result or error from Serialize step. - Initialize *InitializeStep - - // Serialize serializes the prepared input into a data structure that can be consumed - // by the target transport's message, (e.g. REST-JSON serialization) - // - // Converts Input Parameters into a Request, and returns the result or error. - // - // Receives result or error from Build step. - Serialize *SerializeStep - - // Build adds additional metadata to the serialized transport message - // (e.g. HTTP's Content-Length header, or body checksum). Decorations and - // modifications to the message should be copied to all message attempts. - // - // Takes Request, and returns result or error. - // - // Receives result or error from Finalize step. - Build *BuildStep - - // Finalize performs final preparations needed before sending the message. The - // message should already be complete by this stage, and is only alternated - // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 - // request signing) - // - // Takes Request, and returns result or error. - // - // Receives result or error from Deserialize step. - Finalize *FinalizeStep - - // Deserialize reacts to the handler's response returned by the recipient of the request - // message. Deserializes the response into a structured type or error above - // stacks can react to. - // - // Should only forward Request to underlying handler. - // - // Takes Request, and returns result or error. - // - // Receives raw response, or error from underlying handler. - Deserialize *DeserializeStep - - id string -} - -// NewStack returns an initialize empty stack. -func NewStack(id string, newRequestFn func() interface{}) *Stack { - return &Stack{ - id: id, - Initialize: NewInitializeStep(), - Serialize: NewSerializeStep(newRequestFn), - Build: NewBuildStep(), - Finalize: NewFinalizeStep(), - Deserialize: NewDeserializeStep(), - } -} - -// ID returns the unique ID for the stack as a middleware. -func (s *Stack) ID() string { return s.id } - -// HandleMiddleware invokes the middleware stack decorating the next handler. -// Each step of stack will be invoked in order before calling the next step. -// With the next handler call last. -// -// The input value must be the input parameters of the operation being -// performed. -// -// Will return the result of the operation, or error. -func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( - output interface{}, metadata Metadata, err error, -) { - h := DecorateHandler(next, - s.Initialize, - s.Serialize, - s.Build, - s.Finalize, - s.Deserialize, - ) - - return h.Handle(ctx, input) -} - -// List returns a list of all middleware in the stack by step. -func (s *Stack) List() []string { - var l []string - l = append(l, s.id) - - l = append(l, s.Initialize.ID()) - l = append(l, s.Initialize.List()...) - - l = append(l, s.Serialize.ID()) - l = append(l, s.Serialize.List()...) - - l = append(l, s.Build.ID()) - l = append(l, s.Build.List()...) - - l = append(l, s.Finalize.ID()) - l = append(l, s.Finalize.List()...) - - l = append(l, s.Deserialize.ID()) - l = append(l, s.Deserialize.List()...) - - return l -} - -func (s *Stack) String() string { - var b strings.Builder - - w := &indentWriter{w: &b} - - w.WriteLine(s.id) - w.Push() - - writeStepItems(w, s.Initialize) - writeStepItems(w, s.Serialize) - writeStepItems(w, s.Build) - writeStepItems(w, s.Finalize) - writeStepItems(w, s.Deserialize) - - return b.String() -} - -type stackStepper interface { - ID() string - List() []string -} - -func writeStepItems(w *indentWriter, s stackStepper) { - type lister interface { - List() []string - } - - w.WriteLine(s.ID()) - w.Push() - - defer w.Pop() - - // ignore stack to prevent circular iterations - if _, ok := s.(*Stack); ok { - return - } - - for _, id := range s.List() { - w.WriteLine(id) - } -} - -type stringWriter interface { - io.Writer - WriteString(string) (int, error) - WriteRune(rune) (int, error) -} - -type indentWriter struct { - w stringWriter - depth int -} - -const indentDepth = "\t\t\t\t\t\t\t\t\t\t" - -func (w *indentWriter) Push() { - w.depth++ -} - -func (w *indentWriter) Pop() { - w.depth-- - if w.depth < 0 { - w.depth = 0 - } -} - -func (w *indentWriter) WriteLine(v string) { - w.w.WriteString(indentDepth[:w.depth]) - - v = strings.ReplaceAll(v, "\n", "\\n") - v = strings.ReplaceAll(v, "\r", "\\r") - - w.w.WriteString(v) - w.w.WriteRune('\n') -} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go deleted file mode 100644 index ef96009ba182..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/stack_values.go +++ /dev/null @@ -1,100 +0,0 @@ -package middleware - -import ( - "context" - "reflect" - "strings" -) - -// WithStackValue adds a key value pair to the context that is intended to be -// scoped to a stack. Use ClearStackValues to get a new context with all stack -// values cleared. -func WithStackValue(ctx context.Context, key, value interface{}) context.Context { - md, _ := ctx.Value(stackValuesKey{}).(*stackValues) - - md = withStackValue(md, key, value) - return context.WithValue(ctx, stackValuesKey{}, md) -} - -// ClearStackValues returns a context without any stack values. -func ClearStackValues(ctx context.Context) context.Context { - return context.WithValue(ctx, stackValuesKey{}, nil) -} - -// GetStackValues returns the value pointed to by the key within the stack -// values, if it is present. -func GetStackValue(ctx context.Context, key interface{}) interface{} { - md, _ := ctx.Value(stackValuesKey{}).(*stackValues) - if md == nil { - return nil - } - - return md.Value(key) -} - -type stackValuesKey struct{} - -type stackValues struct { - key interface{} - value interface{} - parent *stackValues -} - -func withStackValue(parent *stackValues, key, value interface{}) *stackValues { - if key == nil { - panic("nil key") - } - if !reflect.TypeOf(key).Comparable() { - panic("key is not comparable") - } - return &stackValues{key: key, value: value, parent: parent} -} - -func (m *stackValues) Value(key interface{}) interface{} { - if key == m.key { - return m.value - } - - if m.parent == nil { - return nil - } - - return m.parent.Value(key) -} - -func (c *stackValues) String() string { - var str strings.Builder - - cc := c - for cc == nil { - str.WriteString("(" + - reflect.TypeOf(c.key).String() + - ": " + - stringify(cc.value) + - ")") - if cc.parent != nil { - str.WriteString(" -> ") - } - cc = cc.parent - } - str.WriteRune('}') - - return str.String() -} - -type stringer interface { - String() string -} - -// stringify tries a bit to stringify v, without using fmt, since we don't -// want context depending on the unicode tables. This is only used by -// *valueCtx.String(). -func stringify(v interface{}) string { - switch s := v.(type) { - case stringer: - return s.String() - case string: - return s - } - return "" -} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go deleted file mode 100644 index 7e1d94caeef9..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_build.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import ( - "context" -) - -// BuildInput provides the input parameters for the BuildMiddleware to consume. -// BuildMiddleware may modify the Request value before forwarding the input -// along to the next BuildHandler. -type BuildInput struct { - Request interface{} -} - -// BuildOutput provides the result returned by the next BuildHandler. -type BuildOutput struct { - Result interface{} -} - -// BuildHandler provides the interface for the next handler the -// BuildMiddleware will call in the middleware chain. -type BuildHandler interface { - HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, - ) -} - -// BuildMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next BuildHandler for further -// processing. -type BuildMiddleware interface { - // Unique ID for the middleware in theBuildStep. The step does not allow - // duplicate IDs. - ID() string - - // Invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( - out BuildOutput, metadata Metadata, err error, - ) -} - -// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, -// and the func to be invoked. -func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { - return buildMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type buildMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) -} - -// ID returns the unique ID for the middleware. -func (s buildMiddlewareFunc) ID() string { return s.id } - -// HandleBuild invokes the middleware Fn. -func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( - out BuildOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ BuildMiddleware = (buildMiddlewareFunc{}) - -// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on -// a handler. -type BuildStep struct { - ids *orderedIDs -} - -// NewBuildStep returns a BuildStep ready to have middleware for -// initialization added to it. -func NewBuildStep() *BuildStep { - return &BuildStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*BuildStep)(nil) - -// ID returns the unique name of the step as a middleware. -func (s *BuildStep) ID() string { - return "Build stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h BuildHandler = buildWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedBuildHandler{ - Next: h, - With: order[i].(BuildMiddleware), - } - } - - sIn := BuildInput{ - Request: in, - } - - res, metadata, err := h.HandleBuild(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(BuildMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware id. -// Returns an error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or an error if the middleware to be removed -// doesn't exist. -func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(BuildMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(BuildMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *BuildStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *BuildStep) Clear() { - s.ids.Clear() -} - -type buildWrapHandler struct { - Next Handler -} - -var _ BuildHandler = (*buildWrapHandler)(nil) - -// Implements BuildHandler, converts types and delegates to underlying -// generic handler. -func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return BuildOutput{ - Result: res, - }, metadata, err -} - -type decoratedBuildHandler struct { - Next BuildHandler - With BuildMiddleware -} - -var _ BuildHandler = (*decoratedBuildHandler)(nil) - -func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, -) { - return h.With.HandleBuild(ctx, in, h.Next) -} - -// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. -type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) - -// HandleBuild invokes the wrapped function with the provided arguments. -func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { - return b(ctx, in) -} - -var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go deleted file mode 100644 index 44860721571c..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go +++ /dev/null @@ -1,217 +0,0 @@ -package middleware - -import ( - "context" -) - -// DeserializeInput provides the input parameters for the DeserializeInput to -// consume. DeserializeMiddleware should not modify the Request, and instead -// forward it along to the next DeserializeHandler. -type DeserializeInput struct { - Request interface{} -} - -// DeserializeOutput provides the result returned by the next -// DeserializeHandler. The DeserializeMiddleware should deserialize the -// RawResponse into a Result that can be consumed by middleware higher up in -// the stack. -type DeserializeOutput struct { - RawResponse interface{} - Result interface{} -} - -// DeserializeHandler provides the interface for the next handler the -// DeserializeMiddleware will call in the middleware chain. -type DeserializeHandler interface { - HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, - ) -} - -// DeserializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next DeserializeHandler for further -// processing. -type DeserializeMiddleware interface { - // ID returns a unique ID for the middleware in the DeserializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleDeserialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( - out DeserializeOutput, metadata Metadata, err error, - ) -} - -// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID -// provided, and the func to be invoked. -func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { - return deserializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type deserializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, DeserializeInput, DeserializeHandler) ( - DeserializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s deserializeMiddlewareFunc) ID() string { return s.id } - -// HandleDeserialize invokes the middleware Fn. -func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( - out DeserializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) - -// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be -// invoked on a handler. -type DeserializeStep struct { - ids *orderedIDs -} - -// NewDeserializeStep returns a DeserializeStep ready to have middleware for -// initialization added to it. -func NewDeserializeStep() *DeserializeStep { - return &DeserializeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*DeserializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *DeserializeStep) ID() string { - return "Deserialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h DeserializeHandler = deserializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedDeserializeHandler{ - Next: h, - With: order[i].(DeserializeMiddleware), - } - } - - sIn := DeserializeInput{ - Request: in, - } - - res, metadata, err := h.HandleDeserialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(DeserializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(DeserializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(DeserializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *DeserializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *DeserializeStep) Clear() { - s.ids.Clear() -} - -type deserializeWrapHandler struct { - Next Handler -} - -var _ DeserializeHandler = (*deserializeWrapHandler)(nil) - -// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying -// generic handler. -func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, -) { - resp, metadata, err := w.Next.Handle(ctx, in.Request) - return DeserializeOutput{ - RawResponse: resp, - }, metadata, err -} - -type decoratedDeserializeHandler struct { - Next DeserializeHandler - With DeserializeMiddleware -} - -var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) - -func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, -) { - return h.With.HandleDeserialize(ctx, in, h.Next) -} - -// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. -type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) - -// HandleDeserialize invokes the wrapped function with the given arguments. -func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { - return d(ctx, in) -} - -var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go deleted file mode 100644 index 065e3885de92..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import "context" - -// FinalizeInput provides the input parameters for the FinalizeMiddleware to -// consume. FinalizeMiddleware may modify the Request value before forwarding -// the FinalizeInput along to the next next FinalizeHandler. -type FinalizeInput struct { - Request interface{} -} - -// FinalizeOutput provides the result returned by the next FinalizeHandler. -type FinalizeOutput struct { - Result interface{} -} - -// FinalizeHandler provides the interface for the next handler the -// FinalizeMiddleware will call in the middleware chain. -type FinalizeHandler interface { - HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, - ) -} - -// FinalizeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next FinalizeHandler for further -// processing. -type FinalizeMiddleware interface { - // ID returns a unique ID for the middleware in the FinalizeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleFinalize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( - out FinalizeOutput, metadata Metadata, err error, - ) -} - -// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID -// provided, and the func to be invoked. -func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { - return finalizeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type finalizeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, FinalizeInput, FinalizeHandler) ( - FinalizeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s finalizeMiddlewareFunc) ID() string { return s.id } - -// HandleFinalize invokes the middleware Fn. -func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( - out FinalizeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) - -// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be -// invoked on a handler. -type FinalizeStep struct { - ids *orderedIDs -} - -// NewFinalizeStep returns a FinalizeStep ready to have middleware for -// initialization added to it. -func NewFinalizeStep() *FinalizeStep { - return &FinalizeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*FinalizeStep)(nil) - -// ID returns the unique id of the step as a middleware. -func (s *FinalizeStep) ID() string { - return "Finalize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h FinalizeHandler = finalizeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedFinalizeHandler{ - Next: h, - With: order[i].(FinalizeMiddleware), - } - } - - sIn := FinalizeInput{ - Request: in, - } - - res, metadata, err := h.HandleFinalize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(FinalizeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(FinalizeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(FinalizeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *FinalizeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *FinalizeStep) Clear() { - s.ids.Clear() -} - -type finalizeWrapHandler struct { - Next Handler -} - -var _ FinalizeHandler = (*finalizeWrapHandler)(nil) - -// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying -// generic handler. -func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return FinalizeOutput{ - Result: res, - }, metadata, err -} - -type decoratedFinalizeHandler struct { - Next FinalizeHandler - With FinalizeMiddleware -} - -var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) - -func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, -) { - return h.With.HandleFinalize(ctx, in, h.Next) -} - -// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. -type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) - -// HandleFinalize invokes the wrapped function with the given arguments. -func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { - return f(ctx, in) -} - -var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go deleted file mode 100644 index fe359144d243..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import "context" - -// InitializeInput wraps the input parameters for the InitializeMiddlewares to -// consume. InitializeMiddleware may modify the parameter value before -// forwarding it along to the next InitializeHandler. -type InitializeInput struct { - Parameters interface{} -} - -// InitializeOutput provides the result returned by the next InitializeHandler. -type InitializeOutput struct { - Result interface{} -} - -// InitializeHandler provides the interface for the next handler the -// InitializeMiddleware will call in the middleware chain. -type InitializeHandler interface { - HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, - ) -} - -// InitializeMiddleware provides the interface for middleware specific to the -// initialize step. Delegates to the next InitializeHandler for further -// processing. -type InitializeMiddleware interface { - // ID returns a unique ID for the middleware in the InitializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleInitialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, - ) -} - -// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, -// and the func to be invoked. -func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { - return initializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type initializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, InitializeInput, InitializeHandler) ( - InitializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s initializeMiddlewareFunc) ID() string { return s.id } - -// HandleInitialize invokes the middleware Fn. -func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ InitializeMiddleware = (initializeMiddlewareFunc{}) - -// InitializeStep provides the ordered grouping of InitializeMiddleware to be -// invoked on a handler. -type InitializeStep struct { - ids *orderedIDs -} - -// NewInitializeStep returns an InitializeStep ready to have middleware for -// initialization added to it. -func NewInitializeStep() *InitializeStep { - return &InitializeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*InitializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *InitializeStep) ID() string { - return "Initialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h InitializeHandler = initializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedInitializeHandler{ - Next: h, - With: order[i].(InitializeMiddleware), - } - } - - sIn := InitializeInput{ - Parameters: in, - } - - res, metadata, err := h.HandleInitialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(InitializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(InitializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(InitializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *InitializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *InitializeStep) Clear() { - s.ids.Clear() -} - -type initializeWrapHandler struct { - Next Handler -} - -var _ InitializeHandler = (*initializeWrapHandler)(nil) - -// HandleInitialize implements InitializeHandler, converts types and delegates to underlying -// generic handler. -func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Parameters) - return InitializeOutput{ - Result: res, - }, metadata, err -} - -type decoratedInitializeHandler struct { - Next InitializeHandler - With InitializeMiddleware -} - -var _ InitializeHandler = (*decoratedInitializeHandler)(nil) - -func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, -) { - return h.With.HandleInitialize(ctx, in, h.Next) -} - -// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. -type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) - -// HandleInitialize calls the wrapped function with the provided arguments. -func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { - return i(ctx, in) -} - -var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go deleted file mode 100644 index 114bafcedea8..000000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go +++ /dev/null @@ -1,219 +0,0 @@ -package middleware - -import "context" - -// SerializeInput provides the input parameters for the SerializeMiddleware to -// consume. SerializeMiddleware may modify the Request value before forwarding -// SerializeInput along to the next SerializeHandler. The Parameters member -// should not be modified by SerializeMiddleware, InitializeMiddleware should -// be responsible for modifying the provided Parameter value. -type SerializeInput struct { - Parameters interface{} - Request interface{} -} - -// SerializeOutput provides the result returned by the next SerializeHandler. -type SerializeOutput struct { - Result interface{} -} - -// SerializeHandler provides the interface for the next handler the -// SerializeMiddleware will call in the middleware chain. -type SerializeHandler interface { - HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, - ) -} - -// SerializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next SerializeHandler for further -// processing. -type SerializeMiddleware interface { - // ID returns a unique ID for the middleware in the SerializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleSerialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( - out SerializeOutput, metadata Metadata, err error, - ) -} - -// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID -// provided, and the func to be invoked. -func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { - return serializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type serializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, SerializeInput, SerializeHandler) ( - SerializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s serializeMiddlewareFunc) ID() string { return s.id } - -// HandleSerialize invokes the middleware Fn. -func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( - out SerializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ SerializeMiddleware = (serializeMiddlewareFunc{}) - -// SerializeStep provides the ordered grouping of SerializeMiddleware to be -// invoked on a handler. -type SerializeStep struct { - newRequest func() interface{} - ids *orderedIDs -} - -// NewSerializeStep returns a SerializeStep ready to have middleware for -// initialization added to it. The newRequest func parameter is used to -// initialize the transport specific request for the stack SerializeStep to -// serialize the input parameters into. -func NewSerializeStep(newRequest func() interface{}) *SerializeStep { - return &SerializeStep{ - ids: newOrderedIDs(), - newRequest: newRequest, - } -} - -var _ Middleware = (*SerializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *SerializeStep) ID() string { - return "Serialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h SerializeHandler = serializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedSerializeHandler{ - Next: h, - With: order[i].(SerializeMiddleware), - } - } - - sIn := SerializeInput{ - Parameters: in, - Request: s.newRequest(), - } - - res, metadata, err := h.HandleSerialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(SerializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(SerializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(SerializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *SerializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *SerializeStep) Clear() { - s.ids.Clear() -} - -type serializeWrapHandler struct { - Next Handler -} - -var _ SerializeHandler = (*serializeWrapHandler)(nil) - -// Implements SerializeHandler, converts types and delegates to underlying -// generic handler. -func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return SerializeOutput{ - Result: res, - }, metadata, err -} - -type decoratedSerializeHandler struct { - Next SerializeHandler - With SerializeMiddleware -} - -var _ SerializeHandler = (*decoratedSerializeHandler)(nil) - -func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, -) { - return h.With.HandleSerialize(ctx, in, h.Next) -} - -// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. -type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) - -// HandleSerialize calls the wrapped function with the provided arguments. -func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { - return s(ctx, in) -} - -var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml deleted file mode 100644 index aac582fa2ce5..000000000000 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ /dev/null @@ -1,9 +0,0 @@ -[dependencies] - -[modules] - - [modules.codegen] - no_tag = true - - [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] - no_tag = true diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go deleted file mode 100644 index 004d78f21360..000000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go +++ /dev/null @@ -1,30 +0,0 @@ -package requestcompression - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" -) - -func gzipCompress(input io.Reader) ([]byte, error) { - var b bytes.Buffer - w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression) - if err != nil { - return nil, fmt.Errorf("failed to create gzip writer, %v", err) - } - - inBytes, err := io.ReadAll(input) - if err != nil { - return nil, fmt.Errorf("failed read payload to compress, %v", err) - } - - if _, err = w.Write(inBytes); err != nil { - return nil, fmt.Errorf("failed to write payload to be compressed, %v", err) - } - if err = w.Close(); err != nil { - return nil, fmt.Errorf("failed to flush payload being compressed, %v", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go deleted file mode 100644 index 06c16afc1130..000000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go +++ /dev/null @@ -1,52 +0,0 @@ -package requestcompression - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "net/http" -) - -const captureUncompressedRequestID = "CaptureUncompressedRequest" - -// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check -func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error { - return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{ - buf: buf, - }, "RequestCompression", middleware.Before) -} - -type captureUncompressedRequestMiddleware struct { - req *http.Request - buf *bytes.Buffer - bytes []byte -} - -// ID returns id of the captureUncompressedRequestMiddleware -func (*captureUncompressedRequestMiddleware) ID() string { - return captureUncompressedRequestID -} - -// HandleSerialize captures request payload before it is compressed by request compression middleware -func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - output middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, fmt.Errorf("error when retrieving http request") - } - - _, err = io.Copy(m.buf, request.GetStream()) - if err != nil { - return output, metadata, fmt.Errorf("error when copying http request stream: %q", err) - } - if err = request.RewindStream(); err != nil { - return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err) - } - - return next.HandleSerialize(ctx, input) -} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go deleted file mode 100644 index 7c41476039dd..000000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go +++ /dev/null @@ -1,103 +0,0 @@ -// Package requestcompression implements runtime support for smithy-modeled -// request compression. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package requestcompression - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - "io" -) - -const MaxRequestMinCompressSizeBytes = 10485760 - -// Enumeration values for supported compress Algorithms. -const ( - GZIP = "gzip" -) - -type compressFunc func(io.Reader) ([]byte, error) - -var allowedAlgorithms = map[string]compressFunc{ - GZIP: gzipCompress, -} - -// AddRequestCompression add requestCompression middleware to op stack -func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error { - return stack.Serialize.Add(&requestCompression{ - disableRequestCompression: disabled, - requestMinCompressSizeBytes: minBytes, - compressAlgorithms: algorithms, - }, middleware.After) -} - -type requestCompression struct { - disableRequestCompression bool - requestMinCompressSizeBytes int64 - compressAlgorithms []string -} - -// ID returns the ID of the middleware -func (m requestCompression) ID() string { - return "RequestCompression" -} - -// HandleSerialize gzip compress the request's stream/body if enabled by config fields -func (m requestCompression) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if m.disableRequestCompression { - return next.HandleSerialize(ctx, in) - } - // still need to check requestMinCompressSizeBytes in case it is out of range after service client config - if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes { - return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - for _, algorithm := range m.compressAlgorithms { - compressFunc := allowedAlgorithms[algorithm] - if compressFunc != nil { - if stream := req.GetStream(); stream != nil { - size, found, err := req.StreamLength() - if err != nil { - return out, metadata, fmt.Errorf("error while finding request stream length, %v", err) - } else if !found || size < m.requestMinCompressSizeBytes { - return next.HandleSerialize(ctx, in) - } - - compressedBytes, err := compressFunc(stream) - if err != nil { - return out, metadata, fmt.Errorf("failed to compress request stream, %v", err) - } - - var newReq *http.Request - if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil { - return out, metadata, fmt.Errorf("failed to set request stream, %v", err) - } - *req = *newReq - - if val := req.Header.Get("Content-Encoding"); val != "" { - req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm)) - } else { - req.Header.Set("Content-Encoding", algorithm) - } - } - break - } - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go deleted file mode 100644 index 68df4c4e0e2b..000000000000 --- a/vendor/github.com/aws/smithy-go/properties.go +++ /dev/null @@ -1,69 +0,0 @@ -package smithy - -import "maps" - -// PropertiesReader provides an interface for reading metadata from the -// underlying metadata container. -type PropertiesReader interface { - Get(key any) any -} - -// Properties provides storing and reading metadata values. Keys may be any -// comparable value type. Get and Set will panic if a key is not comparable. -// -// The zero value for a Properties instance is ready for reads/writes without -// any additional initialization. -type Properties struct { - values map[any]any -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m *Properties) Get(key any) any { - m.lazyInit() - return m.values[key] -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Panics if the key type is not comparable. -func (m *Properties) Set(key, value any) { - m.lazyInit() - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m *Properties) Has(key any) bool { - m.lazyInit() - _, ok := m.values[key] - return ok -} - -// SetAll accepts all of the given Properties into the receiver, overwriting -// any existing keys in the case of conflicts. -func (m *Properties) SetAll(other *Properties) { - if other.values == nil { - return - } - - m.lazyInit() - for k, v := range other.values { - m.values[k] = v - } -} - -// Values returns a shallow clone of the property set's values. -func (m *Properties) Values() map[any]any { - return maps.Clone(m.values) -} - -func (m *Properties) lazyInit() { - if m.values == nil { - m.values = map[any]any{} - } -} diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go deleted file mode 100644 index bc1f6996161a..000000000000 --- a/vendor/github.com/aws/smithy-go/ptr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. -package ptr - -//go:generate go run -tags codegen generate.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go deleted file mode 100644 index a2845bb2c803..000000000000 --- a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go +++ /dev/null @@ -1,601 +0,0 @@ -// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. -package ptr - -import ( - "time" -) - -// ToBool returns bool value dereferenced if the passed -// in pointer was not nil. Returns a bool zero value if the -// pointer was nil. -func ToBool(p *bool) (v bool) { - if p == nil { - return v - } - - return *p -} - -// ToBoolSlice returns a slice of bool values, that are -// dereferenced if the passed in pointer was not nil. Returns a bool -// zero value if the pointer was nil. -func ToBoolSlice(vs []*bool) []bool { - ps := make([]bool, len(vs)) - for i, v := range vs { - ps[i] = ToBool(v) - } - - return ps -} - -// ToBoolMap returns a map of bool values, that are -// dereferenced if the passed in pointer was not nil. The bool -// zero value is used if the pointer was nil. -func ToBoolMap(vs map[string]*bool) map[string]bool { - ps := make(map[string]bool, len(vs)) - for k, v := range vs { - ps[k] = ToBool(v) - } - - return ps -} - -// ToByte returns byte value dereferenced if the passed -// in pointer was not nil. Returns a byte zero value if the -// pointer was nil. -func ToByte(p *byte) (v byte) { - if p == nil { - return v - } - - return *p -} - -// ToByteSlice returns a slice of byte values, that are -// dereferenced if the passed in pointer was not nil. Returns a byte -// zero value if the pointer was nil. -func ToByteSlice(vs []*byte) []byte { - ps := make([]byte, len(vs)) - for i, v := range vs { - ps[i] = ToByte(v) - } - - return ps -} - -// ToByteMap returns a map of byte values, that are -// dereferenced if the passed in pointer was not nil. The byte -// zero value is used if the pointer was nil. -func ToByteMap(vs map[string]*byte) map[string]byte { - ps := make(map[string]byte, len(vs)) - for k, v := range vs { - ps[k] = ToByte(v) - } - - return ps -} - -// ToString returns string value dereferenced if the passed -// in pointer was not nil. Returns a string zero value if the -// pointer was nil. -func ToString(p *string) (v string) { - if p == nil { - return v - } - - return *p -} - -// ToStringSlice returns a slice of string values, that are -// dereferenced if the passed in pointer was not nil. Returns a string -// zero value if the pointer was nil. -func ToStringSlice(vs []*string) []string { - ps := make([]string, len(vs)) - for i, v := range vs { - ps[i] = ToString(v) - } - - return ps -} - -// ToStringMap returns a map of string values, that are -// dereferenced if the passed in pointer was not nil. The string -// zero value is used if the pointer was nil. -func ToStringMap(vs map[string]*string) map[string]string { - ps := make(map[string]string, len(vs)) - for k, v := range vs { - ps[k] = ToString(v) - } - - return ps -} - -// ToInt returns int value dereferenced if the passed -// in pointer was not nil. Returns a int zero value if the -// pointer was nil. -func ToInt(p *int) (v int) { - if p == nil { - return v - } - - return *p -} - -// ToIntSlice returns a slice of int values, that are -// dereferenced if the passed in pointer was not nil. Returns a int -// zero value if the pointer was nil. -func ToIntSlice(vs []*int) []int { - ps := make([]int, len(vs)) - for i, v := range vs { - ps[i] = ToInt(v) - } - - return ps -} - -// ToIntMap returns a map of int values, that are -// dereferenced if the passed in pointer was not nil. The int -// zero value is used if the pointer was nil. -func ToIntMap(vs map[string]*int) map[string]int { - ps := make(map[string]int, len(vs)) - for k, v := range vs { - ps[k] = ToInt(v) - } - - return ps -} - -// ToInt8 returns int8 value dereferenced if the passed -// in pointer was not nil. Returns a int8 zero value if the -// pointer was nil. -func ToInt8(p *int8) (v int8) { - if p == nil { - return v - } - - return *p -} - -// ToInt8Slice returns a slice of int8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int8 -// zero value if the pointer was nil. -func ToInt8Slice(vs []*int8) []int8 { - ps := make([]int8, len(vs)) - for i, v := range vs { - ps[i] = ToInt8(v) - } - - return ps -} - -// ToInt8Map returns a map of int8 values, that are -// dereferenced if the passed in pointer was not nil. The int8 -// zero value is used if the pointer was nil. -func ToInt8Map(vs map[string]*int8) map[string]int8 { - ps := make(map[string]int8, len(vs)) - for k, v := range vs { - ps[k] = ToInt8(v) - } - - return ps -} - -// ToInt16 returns int16 value dereferenced if the passed -// in pointer was not nil. Returns a int16 zero value if the -// pointer was nil. -func ToInt16(p *int16) (v int16) { - if p == nil { - return v - } - - return *p -} - -// ToInt16Slice returns a slice of int16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int16 -// zero value if the pointer was nil. -func ToInt16Slice(vs []*int16) []int16 { - ps := make([]int16, len(vs)) - for i, v := range vs { - ps[i] = ToInt16(v) - } - - return ps -} - -// ToInt16Map returns a map of int16 values, that are -// dereferenced if the passed in pointer was not nil. The int16 -// zero value is used if the pointer was nil. -func ToInt16Map(vs map[string]*int16) map[string]int16 { - ps := make(map[string]int16, len(vs)) - for k, v := range vs { - ps[k] = ToInt16(v) - } - - return ps -} - -// ToInt32 returns int32 value dereferenced if the passed -// in pointer was not nil. Returns a int32 zero value if the -// pointer was nil. -func ToInt32(p *int32) (v int32) { - if p == nil { - return v - } - - return *p -} - -// ToInt32Slice returns a slice of int32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int32 -// zero value if the pointer was nil. -func ToInt32Slice(vs []*int32) []int32 { - ps := make([]int32, len(vs)) - for i, v := range vs { - ps[i] = ToInt32(v) - } - - return ps -} - -// ToInt32Map returns a map of int32 values, that are -// dereferenced if the passed in pointer was not nil. The int32 -// zero value is used if the pointer was nil. -func ToInt32Map(vs map[string]*int32) map[string]int32 { - ps := make(map[string]int32, len(vs)) - for k, v := range vs { - ps[k] = ToInt32(v) - } - - return ps -} - -// ToInt64 returns int64 value dereferenced if the passed -// in pointer was not nil. Returns a int64 zero value if the -// pointer was nil. -func ToInt64(p *int64) (v int64) { - if p == nil { - return v - } - - return *p -} - -// ToInt64Slice returns a slice of int64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int64 -// zero value if the pointer was nil. -func ToInt64Slice(vs []*int64) []int64 { - ps := make([]int64, len(vs)) - for i, v := range vs { - ps[i] = ToInt64(v) - } - - return ps -} - -// ToInt64Map returns a map of int64 values, that are -// dereferenced if the passed in pointer was not nil. The int64 -// zero value is used if the pointer was nil. -func ToInt64Map(vs map[string]*int64) map[string]int64 { - ps := make(map[string]int64, len(vs)) - for k, v := range vs { - ps[k] = ToInt64(v) - } - - return ps -} - -// ToUint returns uint value dereferenced if the passed -// in pointer was not nil. Returns a uint zero value if the -// pointer was nil. -func ToUint(p *uint) (v uint) { - if p == nil { - return v - } - - return *p -} - -// ToUintSlice returns a slice of uint values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint -// zero value if the pointer was nil. -func ToUintSlice(vs []*uint) []uint { - ps := make([]uint, len(vs)) - for i, v := range vs { - ps[i] = ToUint(v) - } - - return ps -} - -// ToUintMap returns a map of uint values, that are -// dereferenced if the passed in pointer was not nil. The uint -// zero value is used if the pointer was nil. -func ToUintMap(vs map[string]*uint) map[string]uint { - ps := make(map[string]uint, len(vs)) - for k, v := range vs { - ps[k] = ToUint(v) - } - - return ps -} - -// ToUint8 returns uint8 value dereferenced if the passed -// in pointer was not nil. Returns a uint8 zero value if the -// pointer was nil. -func ToUint8(p *uint8) (v uint8) { - if p == nil { - return v - } - - return *p -} - -// ToUint8Slice returns a slice of uint8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint8 -// zero value if the pointer was nil. -func ToUint8Slice(vs []*uint8) []uint8 { - ps := make([]uint8, len(vs)) - for i, v := range vs { - ps[i] = ToUint8(v) - } - - return ps -} - -// ToUint8Map returns a map of uint8 values, that are -// dereferenced if the passed in pointer was not nil. The uint8 -// zero value is used if the pointer was nil. -func ToUint8Map(vs map[string]*uint8) map[string]uint8 { - ps := make(map[string]uint8, len(vs)) - for k, v := range vs { - ps[k] = ToUint8(v) - } - - return ps -} - -// ToUint16 returns uint16 value dereferenced if the passed -// in pointer was not nil. Returns a uint16 zero value if the -// pointer was nil. -func ToUint16(p *uint16) (v uint16) { - if p == nil { - return v - } - - return *p -} - -// ToUint16Slice returns a slice of uint16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint16 -// zero value if the pointer was nil. -func ToUint16Slice(vs []*uint16) []uint16 { - ps := make([]uint16, len(vs)) - for i, v := range vs { - ps[i] = ToUint16(v) - } - - return ps -} - -// ToUint16Map returns a map of uint16 values, that are -// dereferenced if the passed in pointer was not nil. The uint16 -// zero value is used if the pointer was nil. -func ToUint16Map(vs map[string]*uint16) map[string]uint16 { - ps := make(map[string]uint16, len(vs)) - for k, v := range vs { - ps[k] = ToUint16(v) - } - - return ps -} - -// ToUint32 returns uint32 value dereferenced if the passed -// in pointer was not nil. Returns a uint32 zero value if the -// pointer was nil. -func ToUint32(p *uint32) (v uint32) { - if p == nil { - return v - } - - return *p -} - -// ToUint32Slice returns a slice of uint32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint32 -// zero value if the pointer was nil. -func ToUint32Slice(vs []*uint32) []uint32 { - ps := make([]uint32, len(vs)) - for i, v := range vs { - ps[i] = ToUint32(v) - } - - return ps -} - -// ToUint32Map returns a map of uint32 values, that are -// dereferenced if the passed in pointer was not nil. The uint32 -// zero value is used if the pointer was nil. -func ToUint32Map(vs map[string]*uint32) map[string]uint32 { - ps := make(map[string]uint32, len(vs)) - for k, v := range vs { - ps[k] = ToUint32(v) - } - - return ps -} - -// ToUint64 returns uint64 value dereferenced if the passed -// in pointer was not nil. Returns a uint64 zero value if the -// pointer was nil. -func ToUint64(p *uint64) (v uint64) { - if p == nil { - return v - } - - return *p -} - -// ToUint64Slice returns a slice of uint64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint64 -// zero value if the pointer was nil. -func ToUint64Slice(vs []*uint64) []uint64 { - ps := make([]uint64, len(vs)) - for i, v := range vs { - ps[i] = ToUint64(v) - } - - return ps -} - -// ToUint64Map returns a map of uint64 values, that are -// dereferenced if the passed in pointer was not nil. The uint64 -// zero value is used if the pointer was nil. -func ToUint64Map(vs map[string]*uint64) map[string]uint64 { - ps := make(map[string]uint64, len(vs)) - for k, v := range vs { - ps[k] = ToUint64(v) - } - - return ps -} - -// ToFloat32 returns float32 value dereferenced if the passed -// in pointer was not nil. Returns a float32 zero value if the -// pointer was nil. -func ToFloat32(p *float32) (v float32) { - if p == nil { - return v - } - - return *p -} - -// ToFloat32Slice returns a slice of float32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float32 -// zero value if the pointer was nil. -func ToFloat32Slice(vs []*float32) []float32 { - ps := make([]float32, len(vs)) - for i, v := range vs { - ps[i] = ToFloat32(v) - } - - return ps -} - -// ToFloat32Map returns a map of float32 values, that are -// dereferenced if the passed in pointer was not nil. The float32 -// zero value is used if the pointer was nil. -func ToFloat32Map(vs map[string]*float32) map[string]float32 { - ps := make(map[string]float32, len(vs)) - for k, v := range vs { - ps[k] = ToFloat32(v) - } - - return ps -} - -// ToFloat64 returns float64 value dereferenced if the passed -// in pointer was not nil. Returns a float64 zero value if the -// pointer was nil. -func ToFloat64(p *float64) (v float64) { - if p == nil { - return v - } - - return *p -} - -// ToFloat64Slice returns a slice of float64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float64 -// zero value if the pointer was nil. -func ToFloat64Slice(vs []*float64) []float64 { - ps := make([]float64, len(vs)) - for i, v := range vs { - ps[i] = ToFloat64(v) - } - - return ps -} - -// ToFloat64Map returns a map of float64 values, that are -// dereferenced if the passed in pointer was not nil. The float64 -// zero value is used if the pointer was nil. -func ToFloat64Map(vs map[string]*float64) map[string]float64 { - ps := make(map[string]float64, len(vs)) - for k, v := range vs { - ps[k] = ToFloat64(v) - } - - return ps -} - -// ToTime returns time.Time value dereferenced if the passed -// in pointer was not nil. Returns a time.Time zero value if the -// pointer was nil. -func ToTime(p *time.Time) (v time.Time) { - if p == nil { - return v - } - - return *p -} - -// ToTimeSlice returns a slice of time.Time values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Time -// zero value if the pointer was nil. -func ToTimeSlice(vs []*time.Time) []time.Time { - ps := make([]time.Time, len(vs)) - for i, v := range vs { - ps[i] = ToTime(v) - } - - return ps -} - -// ToTimeMap returns a map of time.Time values, that are -// dereferenced if the passed in pointer was not nil. The time.Time -// zero value is used if the pointer was nil. -func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { - ps := make(map[string]time.Time, len(vs)) - for k, v := range vs { - ps[k] = ToTime(v) - } - - return ps -} - -// ToDuration returns time.Duration value dereferenced if the passed -// in pointer was not nil. Returns a time.Duration zero value if the -// pointer was nil. -func ToDuration(p *time.Duration) (v time.Duration) { - if p == nil { - return v - } - - return *p -} - -// ToDurationSlice returns a slice of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Duration -// zero value if the pointer was nil. -func ToDurationSlice(vs []*time.Duration) []time.Duration { - ps := make([]time.Duration, len(vs)) - for i, v := range vs { - ps[i] = ToDuration(v) - } - - return ps -} - -// ToDurationMap returns a map of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. The time.Duration -// zero value is used if the pointer was nil. -func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { - ps := make(map[string]time.Duration, len(vs)) - for k, v := range vs { - ps[k] = ToDuration(v) - } - - return ps -} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go deleted file mode 100644 index 97f01011e7ea..000000000000 --- a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build codegen -// +build codegen - -package ptr - -import "strings" - -func GetScalars() Scalars { - return Scalars{ - {Type: "bool"}, - {Type: "byte"}, - {Type: "string"}, - {Type: "int"}, - {Type: "int8"}, - {Type: "int16"}, - {Type: "int32"}, - {Type: "int64"}, - {Type: "uint"}, - {Type: "uint8"}, - {Type: "uint16"}, - {Type: "uint32"}, - {Type: "uint64"}, - {Type: "float32"}, - {Type: "float64"}, - {Type: "Time", Import: &Import{Path: "time"}}, - {Type: "Duration", Import: &Import{Path: "time"}}, - } -} - -// Import provides the import path and optional alias -type Import struct { - Path string - Alias string -} - -// Package returns the Go package name for the import. Returns alias if set. -func (i Import) Package() string { - if v := i.Alias; len(v) != 0 { - return v - } - - if v := i.Path; len(v) != 0 { - parts := strings.Split(v, "/") - pkg := parts[len(parts)-1] - return pkg - } - - return "" -} - -// Scalar provides the definition of a type to generate pointer utilities for. -type Scalar struct { - Type string - Import *Import -} - -// Name returns the exported function name for the type. -func (t Scalar) Name() string { - return strings.Title(t.Type) -} - -// Symbol returns the scalar's Go symbol with path if needed. -func (t Scalar) Symbol() string { - if t.Import != nil { - return t.Import.Package() + "." + t.Type - } - return t.Type -} - -// Scalars is a list of scalars. -type Scalars []Scalar - -// Imports returns all imports for the scalars. -func (ts Scalars) Imports() []*Import { - imports := []*Import{} - for _, t := range ts { - if v := t.Import; v != nil { - imports = append(imports, v) - } - } - - return imports -} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go deleted file mode 100644 index 0bfbbecbdce0..000000000000 --- a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go +++ /dev/null @@ -1,499 +0,0 @@ -// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. -package ptr - -import ( - "time" -) - -// Bool returns a pointer value for the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolSlice returns a slice of bool pointers from the values -// passed in. -func BoolSlice(vs []bool) []*bool { - ps := make([]*bool, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// BoolMap returns a map of bool pointers from the values -// passed in. -func BoolMap(vs map[string]bool) map[string]*bool { - ps := make(map[string]*bool, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Byte returns a pointer value for the byte value passed in. -func Byte(v byte) *byte { - return &v -} - -// ByteSlice returns a slice of byte pointers from the values -// passed in. -func ByteSlice(vs []byte) []*byte { - ps := make([]*byte, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// ByteMap returns a map of byte pointers from the values -// passed in. -func ByteMap(vs map[string]byte) map[string]*byte { - ps := make(map[string]*byte, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// String returns a pointer value for the string value passed in. -func String(v string) *string { - return &v -} - -// StringSlice returns a slice of string pointers from the values -// passed in. -func StringSlice(vs []string) []*string { - ps := make([]*string, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// StringMap returns a map of string pointers from the values -// passed in. -func StringMap(vs map[string]string) map[string]*string { - ps := make(map[string]*string, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int returns a pointer value for the int value passed in. -func Int(v int) *int { - return &v -} - -// IntSlice returns a slice of int pointers from the values -// passed in. -func IntSlice(vs []int) []*int { - ps := make([]*int, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// IntMap returns a map of int pointers from the values -// passed in. -func IntMap(vs map[string]int) map[string]*int { - ps := make(map[string]*int, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int8 returns a pointer value for the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Slice returns a slice of int8 pointers from the values -// passed in. -func Int8Slice(vs []int8) []*int8 { - ps := make([]*int8, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int8Map returns a map of int8 pointers from the values -// passed in. -func Int8Map(vs map[string]int8) map[string]*int8 { - ps := make(map[string]*int8, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int16 returns a pointer value for the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Slice returns a slice of int16 pointers from the values -// passed in. -func Int16Slice(vs []int16) []*int16 { - ps := make([]*int16, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int16Map returns a map of int16 pointers from the values -// passed in. -func Int16Map(vs map[string]int16) map[string]*int16 { - ps := make(map[string]*int16, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int32 returns a pointer value for the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Slice returns a slice of int32 pointers from the values -// passed in. -func Int32Slice(vs []int32) []*int32 { - ps := make([]*int32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int32Map returns a map of int32 pointers from the values -// passed in. -func Int32Map(vs map[string]int32) map[string]*int32 { - ps := make(map[string]*int32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int64 returns a pointer value for the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Slice returns a slice of int64 pointers from the values -// passed in. -func Int64Slice(vs []int64) []*int64 { - ps := make([]*int64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int64Map returns a map of int64 pointers from the values -// passed in. -func Int64Map(vs map[string]int64) map[string]*int64 { - ps := make(map[string]*int64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint returns a pointer value for the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintSlice returns a slice of uint pointers from the values -// passed in. -func UintSlice(vs []uint) []*uint { - ps := make([]*uint, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// UintMap returns a map of uint pointers from the values -// passed in. -func UintMap(vs map[string]uint) map[string]*uint { - ps := make(map[string]*uint, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint8 returns a pointer value for the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Slice returns a slice of uint8 pointers from the values -// passed in. -func Uint8Slice(vs []uint8) []*uint8 { - ps := make([]*uint8, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint8Map returns a map of uint8 pointers from the values -// passed in. -func Uint8Map(vs map[string]uint8) map[string]*uint8 { - ps := make(map[string]*uint8, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint16 returns a pointer value for the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Slice returns a slice of uint16 pointers from the values -// passed in. -func Uint16Slice(vs []uint16) []*uint16 { - ps := make([]*uint16, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint16Map returns a map of uint16 pointers from the values -// passed in. -func Uint16Map(vs map[string]uint16) map[string]*uint16 { - ps := make(map[string]*uint16, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint32 returns a pointer value for the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Slice returns a slice of uint32 pointers from the values -// passed in. -func Uint32Slice(vs []uint32) []*uint32 { - ps := make([]*uint32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint32Map returns a map of uint32 pointers from the values -// passed in. -func Uint32Map(vs map[string]uint32) map[string]*uint32 { - ps := make(map[string]*uint32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint64 returns a pointer value for the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Slice returns a slice of uint64 pointers from the values -// passed in. -func Uint64Slice(vs []uint64) []*uint64 { - ps := make([]*uint64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint64Map returns a map of uint64 pointers from the values -// passed in. -func Uint64Map(vs map[string]uint64) map[string]*uint64 { - ps := make(map[string]*uint64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Float32 returns a pointer value for the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Slice returns a slice of float32 pointers from the values -// passed in. -func Float32Slice(vs []float32) []*float32 { - ps := make([]*float32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Float32Map returns a map of float32 pointers from the values -// passed in. -func Float32Map(vs map[string]float32) map[string]*float32 { - ps := make(map[string]*float32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Float64 returns a pointer value for the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Slice returns a slice of float64 pointers from the values -// passed in. -func Float64Slice(vs []float64) []*float64 { - ps := make([]*float64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Float64Map returns a map of float64 pointers from the values -// passed in. -func Float64Map(vs map[string]float64) map[string]*float64 { - ps := make(map[string]*float64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Time returns a pointer value for the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeSlice returns a slice of time.Time pointers from the values -// passed in. -func TimeSlice(vs []time.Time) []*time.Time { - ps := make([]*time.Time, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// TimeMap returns a map of time.Time pointers from the values -// passed in. -func TimeMap(vs map[string]time.Time) map[string]*time.Time { - ps := make(map[string]*time.Time, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Duration returns a pointer value for the time.Duration value passed in. -func Duration(v time.Duration) *time.Duration { - return &v -} - -// DurationSlice returns a slice of time.Duration pointers from the values -// passed in. -func DurationSlice(vs []time.Duration) []*time.Duration { - ps := make([]*time.Duration, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// DurationMap returns a map of time.Duration pointers from the values -// passed in. -func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { - ps := make(map[string]*time.Duration, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go deleted file mode 100644 index f8b25d56259e..000000000000 --- a/vendor/github.com/aws/smithy-go/rand/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rand provides utilities for creating and working with random value -// generators. -package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go deleted file mode 100644 index 9c479f62b59f..000000000000 --- a/vendor/github.com/aws/smithy-go/rand/rand.go +++ /dev/null @@ -1,31 +0,0 @@ -package rand - -import ( - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func init() { - Reader = rand.Reader -} - -// Reader provides a random reader that can reset during testing. -var Reader io.Reader - -// Int63n returns a int64 between zero and value of max, read from an io.Reader source. -func Int63n(reader io.Reader, max int64) (int64, error) { - bi, err := rand.Int(reader, big.NewInt(max)) - if err != nil { - return 0, fmt.Errorf("failed to read random value, %w", err) - } - - return bi.Int64(), nil -} - -// CryptoRandInt63n returns a random int64 between zero and value of max -// obtained from the crypto rand source. -func CryptoRandInt63n(max int64) (int64, error) { - return Int63n(Reader, max) -} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go deleted file mode 100644 index dc81cbc68ac0..000000000000 --- a/vendor/github.com/aws/smithy-go/rand/uuid.go +++ /dev/null @@ -1,87 +0,0 @@ -package rand - -import ( - "encoding/hex" - "io" -) - -const dash byte = '-' - -// UUIDIdempotencyToken provides a utility to get idempotency tokens in the -// UUID format. -type UUIDIdempotencyToken struct { - uuid *UUID -} - -// NewUUIDIdempotencyToken returns a idempotency token provider returning -// tokens in the UUID random format using the reader provided. -func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { - return &UUIDIdempotencyToken{uuid: NewUUID(r)} -} - -// GetIdempotencyToken returns a random UUID value for Idempotency token. -func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { - return u.uuid.GetUUID() -} - -// UUID provides computing random UUID version 4 values from a random source -// reader. -type UUID struct { - randSrc io.Reader -} - -// NewUUID returns an initialized UUID value that can be used to retrieve -// random UUID version 4 values. -func NewUUID(r io.Reader) *UUID { - return &UUID{randSrc: r} -} - -// GetUUID returns a random UUID version 4 string representation sourced from the random reader the -// UUID was created with. Returns an error if unable to compute the UUID. -func (r *UUID) GetUUID() (string, error) { - var b [16]byte - if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { - return "", err - } - r.makeUUIDv4(b[:]) - return format(b), nil -} - -// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the -// UUID was created with. Returns an error if unable to compute the UUID. -func (r *UUID) GetBytes() (u []byte, err error) { - u = make([]byte, 16) - if _, err = io.ReadFull(r.randSrc, u); err != nil { - return u, err - } - r.makeUUIDv4(u) - return u, nil -} - -func (r *UUID) makeUUIDv4(u []byte) { - // 13th character is "4" - u[6] = (u[6] & 0x0f) | 0x40 // Version 4 - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 -} - -// Format returns the canonical text representation of a UUID. -// This implementation is optimized to not use fmt. -// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d -func format(u [16]byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - - var scratch [36]byte - - hex.Encode(scratch[:8], u[0:4]) - scratch[8] = dash - hex.Encode(scratch[9:13], u[4:6]) - scratch[13] = dash - hex.Encode(scratch[14:18], u[6:8]) - scratch[18] = dash - hex.Encode(scratch[19:23], u[8:10]) - scratch[23] = dash - hex.Encode(scratch[24:], u[10:]) - - return string(scratch[:]) -} diff --git a/vendor/github.com/aws/smithy-go/sync/error.go b/vendor/github.com/aws/smithy-go/sync/error.go deleted file mode 100644 index 629207672b4a..000000000000 --- a/vendor/github.com/aws/smithy-go/sync/error.go +++ /dev/null @@ -1,53 +0,0 @@ -package sync - -import "sync" - -// OnceErr wraps the behavior of recording an error -// once and signal on a channel when this has occurred. -// Signaling is done by closing of the channel. -// -// Type is safe for concurrent usage. -type OnceErr struct { - mu sync.RWMutex - err error - ch chan struct{} -} - -// NewOnceErr return a new OnceErr -func NewOnceErr() *OnceErr { - return &OnceErr{ - ch: make(chan struct{}, 1), - } -} - -// Err acquires a read-lock and returns an -// error if one has been set. -func (e *OnceErr) Err() error { - e.mu.RLock() - err := e.err - e.mu.RUnlock() - - return err -} - -// SetError acquires a write-lock and will set -// the underlying error value if one has not been set. -func (e *OnceErr) SetError(err error) { - if err == nil { - return - } - - e.mu.Lock() - if e.err == nil { - e.err = err - close(e.ch) - } - e.mu.Unlock() -} - -// ErrorSet returns a channel that will be used to signal -// that an error has been set. This channel will be closed -// when the error value has been set for OnceErr. -func (e *OnceErr) ErrorSet() <-chan struct{} { - return e.ch -} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go deleted file mode 100644 index b552a09f8a8b..000000000000 --- a/vendor/github.com/aws/smithy-go/time/time.go +++ /dev/null @@ -1,134 +0,0 @@ -package time - -import ( - "context" - "fmt" - "math/big" - "strings" - "time" -) - -const ( - // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 - dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" - dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" - dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" - - // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 - // IMF-fixdate with no UTC offset. - httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - // Additional formats needed for compatibility. - httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" - httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" -) - -var millisecondFloat = big.NewFloat(1e3) - -// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) -// -// Example: 1985-04-12T23:20:50.52Z -func FormatDateTime(value time.Time) string { - return value.UTC().Format(dateTimeFormatOutput) -} - -// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) -// -// Example: 1985-04-12T23:20:50.52Z -func ParseDateTime(value string) (time.Time, error) { - return tryParse(value, - dateTimeFormatInput, - dateTimeFormatInputNoZ, - time.RFC3339Nano, - time.RFC3339, - ) -} - -// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) -// -// Example: Tue, 29 Apr 2014 18:30:38 GMT -func FormatHTTPDate(value time.Time) string { - return value.UTC().Format(httpDateFormat) -} - -// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) -// -// Example: Tue, 29 Apr 2014 18:30:38 GMT -func ParseHTTPDate(value string) (time.Time, error) { - return tryParse(value, - httpDateFormat, - httpDateFormatSingleDigitDay, - httpDateFormatSingleDigitDayTwoDigitYear, - time.RFC850, - time.ANSIC, - ) -} - -// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision -// -// Example: 1515531081.123 -func FormatEpochSeconds(value time.Time) float64 { - ms := value.UnixNano() / int64(time.Millisecond) - return float64(ms) / 1e3 -} - -// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision -// -// Example: 1515531081.123 -func ParseEpochSeconds(value float64) time.Time { - f := big.NewFloat(value) - f = f.Mul(f, millisecondFloat) - i, _ := f.Int64() - // Offset to `UTC` because time.Unix returns the time value based on system - // local setting. - return time.Unix(0, i*1e6).UTC() -} - -func tryParse(v string, formats ...string) (time.Time, error) { - var errs parseErrors - for _, f := range formats { - t, err := time.Parse(f, v) - if err != nil { - errs = append(errs, parseError{ - Format: f, - Err: err, - }) - continue - } - return t, nil - } - - return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) -} - -type parseErrors []parseError - -func (es parseErrors) Error() string { - var s strings.Builder - for _, e := range es { - fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) - } - - return "parse errors:" + s.String() -} - -type parseError struct { - Format string - Err error -} - -// SleepWithContext will wait for the timer duration to expire, or until the context -// is canceled. Whichever happens first. If the context is canceled the -// Context's error will be returned. -func SleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/smithy-go/tracing/context.go b/vendor/github.com/aws/smithy-go/tracing/context.go deleted file mode 100644 index a404ed9d37de..000000000000 --- a/vendor/github.com/aws/smithy-go/tracing/context.go +++ /dev/null @@ -1,96 +0,0 @@ -package tracing - -import "context" - -type ( - operationTracerKey struct{} - spanLineageKey struct{} -) - -// GetSpan returns the active trace Span on the context. -// -// The boolean in the return indicates whether a Span was actually in the -// context, but a no-op implementation will be returned if not, so callers -// can generally disregard the boolean unless they wish to explicitly confirm -// presence/absence of a Span. -func GetSpan(ctx context.Context) (Span, bool) { - lineage := getLineage(ctx) - if len(lineage) == 0 { - return nopSpan{}, false - } - - return lineage[len(lineage)-1], true -} - -// WithSpan sets the active trace Span on the context. -func WithSpan(parent context.Context, span Span) context.Context { - lineage := getLineage(parent) - if len(lineage) == 0 { - return context.WithValue(parent, spanLineageKey{}, []Span{span}) - } - - lineage = append(lineage, span) - return context.WithValue(parent, spanLineageKey{}, lineage) -} - -// PopSpan pops the current Span off the context, setting the active Span on -// the returned Context back to its parent and returning the REMOVED one. -// -// PopSpan on a context with no active Span will return a no-op instance. -// -// This is mostly necessary for the runtime to manage base trace spans due to -// the wrapped-function nature of the middleware stack. End-users of Smithy -// clients SHOULD NOT generally be using this API. -func PopSpan(parent context.Context) (context.Context, Span) { - lineage := getLineage(parent) - if len(lineage) == 0 { - return parent, nopSpan{} - } - - span := lineage[len(lineage)-1] - lineage = lineage[:len(lineage)-1] - return context.WithValue(parent, spanLineageKey{}, lineage), span -} - -func getLineage(ctx context.Context) []Span { - v := ctx.Value(spanLineageKey{}) - if v == nil { - return nil - } - - return v.([]Span) -} - -// GetOperationTracer returns the embedded operation-scoped Tracer on a -// Context. -// -// The boolean in the return indicates whether a Tracer was actually in the -// context, but a no-op implementation will be returned if not, so callers -// can generally disregard the boolean unless they wish to explicitly confirm -// presence/absence of a Tracer. -func GetOperationTracer(ctx context.Context) (Tracer, bool) { - v := ctx.Value(operationTracerKey{}) - if v == nil { - return nopTracer{}, false - } - - return v.(Tracer), true -} - -// WithOperationTracer returns a child Context embedding the given Tracer. -// -// The runtime will use this embed a scoped tracer for client operations, -// Smithy/SDK client callers DO NOT need to do this explicitly. -func WithOperationTracer(parent context.Context, tracer Tracer) context.Context { - return context.WithValue(parent, operationTracerKey{}, tracer) -} - -// StartSpan is a convenience API for creating tracing Spans from a Context. -// -// StartSpan uses the operation-scoped Tracer, previously stored using -// [WithOperationTracer], to start the Span. If a Tracer has not been embedded -// the returned Span will be a no-op implementation. -func StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { - tracer, _ := GetOperationTracer(ctx) - return tracer.StartSpan(ctx, name, opts...) -} diff --git a/vendor/github.com/aws/smithy-go/tracing/nop.go b/vendor/github.com/aws/smithy-go/tracing/nop.go deleted file mode 100644 index 573d28b1c1bd..000000000000 --- a/vendor/github.com/aws/smithy-go/tracing/nop.go +++ /dev/null @@ -1,32 +0,0 @@ -package tracing - -import "context" - -// NopTracerProvider is a no-op tracing implementation. -type NopTracerProvider struct{} - -var _ TracerProvider = (*NopTracerProvider)(nil) - -// Tracer returns a tracer which creates no-op spans. -func (NopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return nopTracer{} -} - -type nopTracer struct{} - -var _ Tracer = (*nopTracer)(nil) - -func (nopTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { - return ctx, nopSpan{} -} - -type nopSpan struct{} - -var _ Span = (*nopSpan)(nil) - -func (nopSpan) Name() string { return "" } -func (nopSpan) Context() SpanContext { return SpanContext{} } -func (nopSpan) AddEvent(string, ...EventOption) {} -func (nopSpan) SetProperty(any, any) {} -func (nopSpan) SetStatus(SpanStatus) {} -func (nopSpan) End() {} diff --git a/vendor/github.com/aws/smithy-go/tracing/tracing.go b/vendor/github.com/aws/smithy-go/tracing/tracing.go deleted file mode 100644 index 089ed3932e2d..000000000000 --- a/vendor/github.com/aws/smithy-go/tracing/tracing.go +++ /dev/null @@ -1,95 +0,0 @@ -// Package tracing defines tracing APIs to be used by Smithy clients. -package tracing - -import ( - "context" - - "github.com/aws/smithy-go" -) - -// SpanStatus records the "success" state of an observed span. -type SpanStatus int - -// Enumeration of SpanStatus. -const ( - SpanStatusUnset SpanStatus = iota - SpanStatusOK - SpanStatusError -) - -// SpanKind indicates the nature of the work being performed. -type SpanKind int - -// Enumeration of SpanKind. -const ( - SpanKindInternal SpanKind = iota - SpanKindClient - SpanKindServer - SpanKindProducer - SpanKindConsumer -) - -// TracerProvider is the entry point for creating client traces. -type TracerProvider interface { - Tracer(scope string, opts ...TracerOption) Tracer -} - -// TracerOption applies configuration to a tracer. -type TracerOption func(o *TracerOptions) - -// TracerOptions represent configuration for tracers. -type TracerOptions struct { - Properties smithy.Properties -} - -// Tracer is the entry point for creating observed client Spans. -// -// Spans created by tracers propagate by existing on the Context. Consumers of -// the API can use [GetSpan] to pull the active Span from a Context. -// -// Creation of child Spans is implicit through Context persistence. If -// CreateSpan is called with a Context that holds a Span, the result will be a -// child of that Span. -type Tracer interface { - StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) -} - -// SpanOption applies configuration to a span. -type SpanOption func(o *SpanOptions) - -// SpanOptions represent configuration for span events. -type SpanOptions struct { - Kind SpanKind - Properties smithy.Properties -} - -// Span records a conceptually individual unit of work that takes place in a -// Smithy client operation. -type Span interface { - Name() string - Context() SpanContext - AddEvent(name string, opts ...EventOption) - SetStatus(status SpanStatus) - SetProperty(k, v any) - End() -} - -// EventOption applies configuration to a span event. -type EventOption func(o *EventOptions) - -// EventOptions represent configuration for span events. -type EventOptions struct { - Properties smithy.Properties -} - -// SpanContext uniquely identifies a Span. -type SpanContext struct { - TraceID string - SpanID string - IsRemote bool -} - -// IsValid is true when a span has nonzero trace and span IDs. -func (ctx *SpanContext) IsValid() bool { - return len(ctx.TraceID) != 0 && len(ctx.SpanID) != 0 -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go deleted file mode 100644 index 58e1ab5ef876..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/auth.go +++ /dev/null @@ -1,21 +0,0 @@ -package http - -import ( - "context" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// AuthScheme defines an HTTP authentication scheme. -type AuthScheme interface { - SchemeID() string - IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver - Signer() Signer -} - -// Signer defines the interface through which HTTP requests are supplemented -// with an Identity. -type Signer interface { - SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go deleted file mode 100644 index d60cf2a60fd6..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go +++ /dev/null @@ -1,45 +0,0 @@ -package http - -import ( - "context" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// NewAnonymousScheme returns the anonymous HTTP auth scheme. -func NewAnonymousScheme() AuthScheme { - return &authScheme{ - schemeID: auth.SchemeIDAnonymous, - signer: &nopSigner{}, - } -} - -// authScheme is parameterized to generically implement the exported AuthScheme -// interface -type authScheme struct { - schemeID string - signer Signer -} - -var _ AuthScheme = (*authScheme)(nil) - -func (s *authScheme) SchemeID() string { - return s.schemeID -} - -func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { - return o.GetIdentityResolver(s.schemeID) -} - -func (s *authScheme) Signer() Signer { - return s.signer -} - -type nopSigner struct{} - -var _ Signer = (*nopSigner)(nil) - -func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error { - return nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go deleted file mode 100644 index bc4ad6e79739..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go +++ /dev/null @@ -1,70 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -const contentMD5Header = "Content-Md5" - -// contentMD5Checksum provides a middleware to compute and set -// content-md5 checksum for a http request -type contentMD5Checksum struct { -} - -// AddContentChecksumMiddleware adds checksum middleware to middleware's -// build step. -func AddContentChecksumMiddleware(stack *middleware.Stack) error { - // This middleware must be executed before request body is set. - return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) -} - -// ID returns the identifier for the checksum middleware -func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } - -// HandleBuild adds behavior to compute md5 checksum and add content-md5 header -// on http request -func (m *contentMD5Checksum) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // if Content-MD5 header is already present, return - if v := req.Header.Get(contentMD5Header); len(v) != 0 { - return next.HandleBuild(ctx, in) - } - - // fetch the request stream. - stream := req.GetStream() - // compute checksum if payload is explicit - if stream != nil { - if !req.IsStreamSeekable() { - return out, metadata, fmt.Errorf( - "unseekable stream is not supported for computing md5 checksum") - } - - v, err := computeMD5Checksum(stream) - if err != nil { - return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) - } - - // reset the request stream - if err := req.RewindStream(); err != nil { - return out, metadata, fmt.Errorf( - "error rewinding request stream after computing md5 checksum, %w", err) - } - - // set the 'Content-MD5' header - req.Header.Set(contentMD5Header, string(v)) - } - - // set md5 header value - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go deleted file mode 100644 index 0fceae81dbab..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/client.go +++ /dev/null @@ -1,161 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" -) - -// ClientDo provides the interface for custom HTTP client implementations. -type ClientDo interface { - Do(*http.Request) (*http.Response, error) -} - -// ClientDoFunc provides a helper to wrap a function as an HTTP client for -// round tripping requests. -type ClientDoFunc func(*http.Request) (*http.Response, error) - -// Do will invoke the underlying func, returning the result. -func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { - return fn(r) -} - -// ClientHandler wraps a client that implements the HTTP Do method. Standard -// implementation is http.Client. -type ClientHandler struct { - client ClientDo - - Meter metrics.Meter // For HTTP client metrics. -} - -// NewClientHandler returns an initialized middleware handler for the client. -// -// Deprecated: Use [NewClientHandlerWithOptions]. -func NewClientHandler(client ClientDo) ClientHandler { - return NewClientHandlerWithOptions(client) -} - -// NewClientHandlerWithOptions returns an initialized middleware handler for the client -// with applied options. -func NewClientHandlerWithOptions(client ClientDo, opts ...func(*ClientHandler)) ClientHandler { - h := ClientHandler{ - client: client, - } - for _, opt := range opts { - opt(&h) - } - if h.Meter == nil { - h.Meter = metrics.NopMeterProvider{}.Meter("") - } - return h -} - -// Handle implements the middleware Handler interface, that will invoke the -// underlying HTTP client. Requires the input to be a Smithy *Request. Returns -// a smithy *Response, or error if the request failed. -func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( - out interface{}, metadata middleware.Metadata, err error, -) { - ctx, span := tracing.StartSpan(ctx, "DoHTTPRequest") - defer span.End() - - ctx, client, err := withMetrics(ctx, c.client, c.Meter) - if err != nil { - return nil, metadata, fmt.Errorf("instrument with HTTP metrics: %w", err) - } - - req, ok := input.(*Request) - if !ok { - return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) - } - - builtRequest := req.Build(ctx) - if err := ValidateEndpointHost(builtRequest.Host); err != nil { - return nil, metadata, err - } - - span.SetProperty("http.method", req.Method) - span.SetProperty("http.request_content_length", -1) // at least indicate unknown - length, ok, err := req.StreamLength() - if err != nil { - return nil, metadata, err - } - if ok { - span.SetProperty("http.request_content_length", length) - } - - resp, err := client.Do(builtRequest) - if resp == nil { - // Ensure a http response value is always present to prevent unexpected - // panics. - resp = &http.Response{ - Header: http.Header{}, - Body: http.NoBody, - } - } - if err != nil { - err = &RequestSendError{Err: err} - - // Override the error with a context canceled error, if that was canceled. - select { - case <-ctx.Done(): - err = &smithy.CanceledError{Err: ctx.Err()} - default: - } - } - - // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. - // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the - // stream reference so that it can be safely reused. - if builtRequest.Body != nil { - _ = builtRequest.Body.Close() - } - - span.SetProperty("net.protocol.version", fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor)) - span.SetProperty("http.status_code", resp.StatusCode) - span.SetProperty("http.response_content_length", resp.ContentLength) - - return &Response{Response: resp}, metadata, err -} - -// RequestSendError provides a generic request transport error. This error -// should wrap errors making HTTP client requests. -// -// The ClientHandler will wrap the HTTP client's error if the client request -// fails, and did not fail because of context canceled. -type RequestSendError struct { - Err error -} - -// ConnectionError returns that the error is related to not being able to send -// the request, or receive a response from the service. -func (e *RequestSendError) ConnectionError() bool { - return true -} - -// Unwrap returns the underlying error, if there was one. -func (e *RequestSendError) Unwrap() error { - return e.Err -} - -func (e *RequestSendError) Error() string { - return fmt.Sprintf("request send failed, %v", e.Err) -} - -// NopClient provides a client that ignores the request, and returns an empty -// successful HTTP response value. -type NopClient struct{} - -// Do ignores the request and returns a 200 status empty response. -func (NopClient) Do(r *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: http.NoBody, - }, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go deleted file mode 100644 index 07366ac85a88..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package http provides the HTTP transport client and request/response types -needed to round trip API operation calls with an service. -*/ -package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go deleted file mode 100644 index cbc9deb4df07..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go +++ /dev/null @@ -1,163 +0,0 @@ -package http - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { - values := make([]string, 0, len(vs)) - - for i := 0; i < len(vs); i++ { - parts, err := splitFn(vs[i]) - if err != nil { - return nil, err - } - values = append(values, parts...) - } - - return values, nil -} - -// SplitHeaderListValues attempts to split the elements of the slice by commas, -// and return a list of all values separated. Returns error if unable to -// separate the values. -func SplitHeaderListValues(vs []string) ([]string, error) { - return splitHeaderListValues(vs, quotedCommaSplit) -} - -func quotedCommaSplit(v string) (parts []string, err error) { - v = strings.TrimSpace(v) - - expectMore := true - for i := 0; i < len(v); i++ { - if unicode.IsSpace(rune(v[i])) { - continue - } - expectMore = false - - // leading space in part is ignored. - // Start of value must be non-space, or quote. - // - // - If quote, enter quoted mode, find next non-escaped quote to - // terminate the value. - // - Otherwise, find next comma to terminate value. - - remaining := v[i:] - - var value string - var valueLen int - if remaining[0] == '"' { - //------------------------------ - // Quoted value - //------------------------------ - var j int - var skipQuote bool - for j += 1; j < len(remaining); j++ { - if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { - skipQuote = !skipQuote - continue - } - if remaining[j] == '"' { - break - } - } - if j == len(remaining) || j == 1 { - return nil, fmt.Errorf("value %v missing closing double quote", - remaining) - } - valueLen = j + 1 - - tail := remaining[valueLen:] - var k int - for ; k < len(tail); k++ { - if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { - return nil, fmt.Errorf("value %v has non-space trailing characters", - remaining) - } - if tail[k] == ',' { - expectMore = true - break - } - } - value = remaining[:valueLen] - value, err = strconv.Unquote(value) - if err != nil { - return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) - } - - // Pad valueLen to include trailing space(s) so `i` is updated correctly. - valueLen += k - - } else { - //------------------------------ - // Unquoted value - //------------------------------ - - // Index of the next comma is the length of the value, or end of string. - valueLen = strings.Index(remaining, ",") - if valueLen != -1 { - expectMore = true - } else { - valueLen = len(remaining) - } - value = strings.TrimSpace(remaining[:valueLen]) - } - - i += valueLen - parts = append(parts, value) - - } - - if expectMore { - parts = append(parts, "") - } - - return parts, nil -} - -// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date -// timestamp values in the slice by commas, and return a list of all values -// separated. The split is aware of the HTTP-Date timestamp format, and will skip -// comma within the timestamp value. Returns an error if unable to split the -// timestamp values. -func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { - return splitHeaderListValues(vs, splitHTTPDateHeaderValue) -} - -func splitHTTPDateHeaderValue(v string) ([]string, error) { - if n := strings.Count(v, ","); n <= 1 { - // Nothing to do if only contains a no, or single HTTPDate value - return []string{v}, nil - } else if n%2 == 0 { - return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) - } - - var parts []string - var i, j int - - var doSplit bool - for ; i < len(v); i++ { - if v[i] == ',' { - if doSplit { - doSplit = false - parts = append(parts, strings.TrimSpace(v[j:i])) - j = i + 1 - } else { - // Skip the first comma in the timestamp value since that - // separates the day from the rest of the timestamp. - // - // Tue, 17 Dec 2019 23:48:18 GMT - doSplit = true - } - } - } - // Add final part - if j < len(v) { - parts = append(parts, strings.TrimSpace(v[j:])) - } - - return parts, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go deleted file mode 100644 index db9801bea522..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/host.go +++ /dev/null @@ -1,89 +0,0 @@ -package http - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(host string) error { - var errors strings.Builder - var hostname string - var port string - var err error - - if strings.Contains(host, ":") { - hostname, port, err = net.SplitHostPort(host) - if err != nil { - errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) - errors.WriteString(err.Error()) - } - - if !ValidPortNumber(port) { - errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) - } - } else { - hostname = host - } - - labels := strings.Split(hostname, ".") - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") - errors.WriteString(label) - } - } - - if len(hostname) == 0 && len(port) != 0 { - errors.WriteString("\nendpoint host with port must not be empty") - } - - if len(hostname) > 255 { - errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) - } - - if len(errors.String()) > 0 { - return fmt.Errorf("invalid endpoint host%s", errors.String()) - } - return nil -} - -// ValidPortNumber returns whether the port is valid RFC 3986 port. -func ValidPortNumber(port string) bool { - i, err := strconv.Atoi(port) - if err != nil { - return false - } - - if i < 0 || i > 65535 { - return false - } - return true -} - -// ValidHostLabel returns whether the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go deleted file mode 100644 index e21f2632a6e3..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go +++ /dev/null @@ -1,321 +0,0 @@ -package http - -import ( - "context" -) - -func icopy[T any](v []T) []T { - s := make([]T, len(v)) - copy(s, v) - return s -} - -// InterceptorContext is all the information available in different -// interceptors. -// -// Not all information is available in each interceptor, see each interface -// definition for more details. -type InterceptorContext struct { - Input any - Request *Request - - Output any - Response *Response -} - -// InterceptorRegistry holds a list of operation interceptors. -// -// Interceptors allow callers to insert custom behavior at well-defined points -// within a client's operation lifecycle. -// -// # Interceptor context -// -// All interceptors are invoked with a context object that contains input and -// output containers for the operation. The individual fields that are -// available will depend on what the interceptor is and, in certain -// interceptors, how far the operation was able to progress. See the -// documentation for each interface definition for more information about field -// availability. -// -// Implementations MUST NOT directly mutate the values of the fields in the -// interceptor context. They are free to mutate the existing values _pointed -// to_ by those fields, however. -// -// # Returning errors -// -// All interceptors can return errors. If an interceptor returns an error -// _before_ the client's retry loop, the operation will fail immediately. If -// one returns an error _within_ the retry loop, the error WILL be considered -// according to the client's retry policy. -// -// # Adding interceptors -// -// Idiomatically you will simply use one of the Add() receiver methods to -// register interceptors as desired. However, the list for each interface is -// exported on the registry struct and the caller is free to manipulate it -// directly, for example, to register a number of interceptors all at once, or -// to remove one that was previously registered. -// -// The base SDK client WILL NOT add any interceptors. SDK operations and -// customizations are implemented in terms of middleware. -// -// Modifications to the registry will not persist across operation calls when -// using per-operation functional options. This means you can register -// interceptors on a per-operation basis without affecting other operations. -type InterceptorRegistry struct { - BeforeExecution []BeforeExecutionInterceptor - BeforeSerialization []BeforeSerializationInterceptor - AfterSerialization []AfterSerializationInterceptor - BeforeRetryLoop []BeforeRetryLoopInterceptor - BeforeAttempt []BeforeAttemptInterceptor - BeforeSigning []BeforeSigningInterceptor - AfterSigning []AfterSigningInterceptor - BeforeTransmit []BeforeTransmitInterceptor - AfterTransmit []AfterTransmitInterceptor - BeforeDeserialization []BeforeDeserializationInterceptor - AfterDeserialization []AfterDeserializationInterceptor - AfterAttempt []AfterAttemptInterceptor - AfterExecution []AfterExecutionInterceptor -} - -// Copy returns a deep copy of the registry. This is used by SDK clients on -// each operation call in order to prevent per-op config mutation from -// persisting. -func (i *InterceptorRegistry) Copy() InterceptorRegistry { - return InterceptorRegistry{ - BeforeExecution: icopy(i.BeforeExecution), - BeforeSerialization: icopy(i.BeforeSerialization), - AfterSerialization: icopy(i.AfterSerialization), - BeforeRetryLoop: icopy(i.BeforeRetryLoop), - BeforeAttempt: icopy(i.BeforeAttempt), - BeforeSigning: icopy(i.BeforeSigning), - AfterSigning: icopy(i.AfterSigning), - BeforeTransmit: icopy(i.BeforeTransmit), - AfterTransmit: icopy(i.AfterTransmit), - BeforeDeserialization: icopy(i.BeforeDeserialization), - AfterDeserialization: icopy(i.AfterDeserialization), - AfterAttempt: icopy(i.AfterAttempt), - AfterExecution: icopy(i.AfterExecution), - } -} - -// AddBeforeExecution registers the provided BeforeExecutionInterceptor. -func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) { - i.BeforeExecution = append(i.BeforeExecution, v) -} - -// AddBeforeSerialization registers the provided BeforeSerializationInterceptor. -func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) { - i.BeforeSerialization = append(i.BeforeSerialization, v) -} - -// AddAfterSerialization registers the provided AfterSerializationInterceptor. -func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) { - i.AfterSerialization = append(i.AfterSerialization, v) -} - -// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor. -func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) { - i.BeforeRetryLoop = append(i.BeforeRetryLoop, v) -} - -// AddBeforeAttempt registers the provided BeforeAttemptInterceptor. -func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) { - i.BeforeAttempt = append(i.BeforeAttempt, v) -} - -// AddBeforeSigning registers the provided BeforeSigningInterceptor. -func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) { - i.BeforeSigning = append(i.BeforeSigning, v) -} - -// AddAfterSigning registers the provided AfterSigningInterceptor. -func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) { - i.AfterSigning = append(i.AfterSigning, v) -} - -// AddBeforeTransmit registers the provided BeforeTransmitInterceptor. -func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) { - i.BeforeTransmit = append(i.BeforeTransmit, v) -} - -// AddAfterTransmit registers the provided AfterTransmitInterceptor. -func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) { - i.AfterTransmit = append(i.AfterTransmit, v) -} - -// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor. -func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) { - i.BeforeDeserialization = append(i.BeforeDeserialization, v) -} - -// AddAfterDeserialization registers the provided AfterDeserializationInterceptor. -func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) { - i.AfterDeserialization = append(i.AfterDeserialization, v) -} - -// AddAfterAttempt registers the provided AfterAttemptInterceptor. -func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) { - i.AfterAttempt = append(i.AfterAttempt, v) -} - -// AddAfterExecution registers the provided AfterExecutionInterceptor. -func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) { - i.AfterExecution = append(i.AfterExecution, v) -} - -// BeforeExecutionInterceptor runs before anything else in the operation -// lifecycle. -// -// Available InterceptorContext fields: -// - Input -type BeforeExecutionInterceptor interface { - BeforeExecution(ctx context.Context, in *InterceptorContext) error -} - -// BeforeSerializationInterceptor runs before the operation input is serialized -// into its transport request. -// -// Serialization occurs before the operation's retry loop. -// -// Available InterceptorContext fields: -// - Input -type BeforeSerializationInterceptor interface { - BeforeSerialization(ctx context.Context, in *InterceptorContext) error -} - -// AfterSerializationInterceptor runs after the operation input is serialized -// into its transport request. -// -// Available InterceptorContext fields: -// - Input -// - Request -type AfterSerializationInterceptor interface { - AfterSerialization(ctx context.Context, in *InterceptorContext) error -} - -// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop. -// -// Available InterceptorContext fields: -// - Input -// - Request -type BeforeRetryLoopInterceptor interface { - BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error -} - -// BeforeAttemptInterceptor runs right before every attempt in the retry loop. -// -// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be -// invoked. -// -// Available InterceptorContext fields: -// - Input -// - Request -type BeforeAttemptInterceptor interface { - BeforeAttempt(ctx context.Context, in *InterceptorContext) error -} - -// BeforeSigningInterceptor runs right before the request is signed. -// -// Signing occurs within the operation's retry loop. -// -// Available InterceptorContext fields: -// - Input -// - Request -type BeforeSigningInterceptor interface { - BeforeSigning(ctx context.Context, in *InterceptorContext) error -} - -// AfterSigningInterceptor runs right after the request is signed. -// -// It is unsafe to modify the outgoing HTTP request at or past this hook, since -// doing so may invalidate the signature of the request. -// -// Available InterceptorContext fields: -// - Input -// - Request -type AfterSigningInterceptor interface { - AfterSigning(ctx context.Context, in *InterceptorContext) error -} - -// BeforeTransmitInterceptor runs right before the HTTP request is sent. -// -// HTTP transmit occurs within the operation's retry loop. -// -// Available InterceptorContext fields: -// - Input -// - Request -type BeforeTransmitInterceptor interface { - BeforeTransmit(ctx context.Context, in *InterceptorContext) error -} - -// AfterTransmitInterceptor runs right after the HTTP response is received. -// -// It will always be invoked when a response is received, regardless of its -// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was -// not successful, e.g. because of a DNS resolution error -// -// Available InterceptorContext fields: -// - Input -// - Request -// - Response -type AfterTransmitInterceptor interface { - AfterTransmit(ctx context.Context, in *InterceptorContext) error -} - -// BeforeDeserializationInterceptor runs right before the incoming HTTP response -// is deserialized. -// -// This interceptor IS NOT invoked if the HTTP round-trip was not successful. -// -// Deserialization occurs within the operation's retry loop. -// -// Available InterceptorContext fields: -// - Input -// - Request -// - Response -type BeforeDeserializationInterceptor interface { - BeforeDeserialization(ctx context.Context, in *InterceptorContext) error -} - -// AfterDeserializationInterceptor runs right after the incoming HTTP response -// is deserialized. This hook is invoked regardless of whether the deserialized -// result was an error. -// -// This interceptor IS NOT invoked if the HTTP round-trip was not successful. -// -// Available InterceptorContext fields: -// - Input -// - Output (IF the operation had a success-level response) -// - Request -// - Response -type AfterDeserializationInterceptor interface { - AfterDeserialization(ctx context.Context, in *InterceptorContext) error -} - -// AfterAttemptInterceptor runs right after the incoming HTTP response -// is deserialized. This hook is invoked regardless of whether the deserialized -// result was an error, or if another interceptor within the retry loop -// returned an error. -// -// Available InterceptorContext fields: -// - Input -// - Output (IF the operation had a success-level response) -// - Request (IF the operation did not return an error during serialization) -// - Response (IF the operation was able to transmit the HTTP request) -type AfterAttemptInterceptor interface { - AfterAttempt(ctx context.Context, in *InterceptorContext) error -} - -// AfterExecutionInterceptor runs after everything else. It runs regardless of -// how far the operation progressed in its lifecycle, and regardless of whether -// the operation succeeded or failed. -// -// Available InterceptorContext fields: -// - Input -// - Output (IF the operation had a success-level response) -// - Request (IF the operation did not return an error during serialization) -// - Response (IF the operation was able to transmit the HTTP request) -type AfterExecutionInterceptor interface { - AfterExecution(ctx context.Context, in *InterceptorContext) error -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go deleted file mode 100644 index 2cc4b57f8947..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go +++ /dev/null @@ -1,325 +0,0 @@ -package http - -import ( - "context" - "errors" - - "github.com/aws/smithy-go/middleware" -) - -type ictxKey struct{} - -func withIctx(ctx context.Context) context.Context { - return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{}) -} - -func getIctx(ctx context.Context) *InterceptorContext { - return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext) -} - -// InterceptExecution runs Before/AfterExecutionInterceptors. -type InterceptExecution struct { - BeforeExecution []BeforeExecutionInterceptor - AfterExecution []AfterExecutionInterceptor -} - -// ID identifies the middleware. -func (m *InterceptExecution) ID() string { - return "InterceptExecution" -} - -// HandleInitialize runs the interceptors. -func (m *InterceptExecution) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, md middleware.Metadata, err error, -) { - ctx = withIctx(ctx) - getIctx(ctx).Input = in.Parameters - - for _, i := range m.BeforeExecution { - if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - out, md, err = next.HandleInitialize(ctx, in) - - for _, i := range m.AfterExecution { - if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return out, md, err -} - -// InterceptBeforeSerialization runs BeforeSerializationInterceptors. -type InterceptBeforeSerialization struct { - Interceptors []BeforeSerializationInterceptor -} - -// ID identifies the middleware. -func (m *InterceptBeforeSerialization) ID() string { - return "InterceptBeforeSerialization" -} - -// HandleSerialize runs the interceptors. -func (m *InterceptBeforeSerialization) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, md middleware.Metadata, err error, -) { - for _, i := range m.Interceptors { - if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return next.HandleSerialize(ctx, in) -} - -// InterceptAfterSerialization runs AfterSerializationInterceptors. -type InterceptAfterSerialization struct { - Interceptors []AfterSerializationInterceptor -} - -// ID identifies the middleware. -func (m *InterceptAfterSerialization) ID() string { - return "InterceptAfterSerialization" -} - -// HandleSerialize runs the interceptors. -func (m *InterceptAfterSerialization) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, md middleware.Metadata, err error, -) { - getIctx(ctx).Request = in.Request.(*Request) - - for _, i := range m.Interceptors { - if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return next.HandleSerialize(ctx, in) -} - -// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors. -type InterceptBeforeRetryLoop struct { - Interceptors []BeforeRetryLoopInterceptor -} - -// ID identifies the middleware. -func (m *InterceptBeforeRetryLoop) ID() string { - return "InterceptBeforeRetryLoop" -} - -// HandleFinalize runs the interceptors. -func (m *InterceptBeforeRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, md middleware.Metadata, err error, -) { - for _, i := range m.Interceptors { - if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return next.HandleFinalize(ctx, in) -} - -// InterceptBeforeSigning runs BeforeSigningInterceptors. -type InterceptBeforeSigning struct { - Interceptors []BeforeSigningInterceptor -} - -// ID identifies the middleware. -func (m *InterceptBeforeSigning) ID() string { - return "InterceptBeforeSigning" -} - -// HandleFinalize runs the interceptors. -func (m *InterceptBeforeSigning) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, md middleware.Metadata, err error, -) { - for _, i := range m.Interceptors { - if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return next.HandleFinalize(ctx, in) -} - -// InterceptAfterSigning runs AfterSigningInterceptors. -type InterceptAfterSigning struct { - Interceptors []AfterSigningInterceptor -} - -// ID identifies the middleware. -func (m *InterceptAfterSigning) ID() string { - return "InterceptAfterSigning" -} - -// HandleFinalize runs the interceptors. -func (m *InterceptAfterSigning) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, md middleware.Metadata, err error, -) { - for _, i := range m.Interceptors { - if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return next.HandleFinalize(ctx, in) -} - -// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors. -type InterceptTransmit struct { - BeforeTransmit []BeforeTransmitInterceptor - AfterTransmit []AfterTransmitInterceptor -} - -// ID identifies the middleware. -func (m *InterceptTransmit) ID() string { - return "InterceptTransmit" -} - -// HandleDeserialize runs the interceptors. -func (m *InterceptTransmit) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, md middleware.Metadata, err error, -) { - for _, i := range m.BeforeTransmit { - if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - out, md, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, md, err - } - - // the root of the decorated middleware guarantees this will be here - // (client.go: ClientHandler.Handle) - getIctx(ctx).Response = out.RawResponse.(*Response) - - for _, i := range m.AfterTransmit { - if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return out, md, err -} - -// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors. -type InterceptBeforeDeserialization struct { - Interceptors []BeforeDeserializationInterceptor -} - -// ID identifies the middleware. -func (m *InterceptBeforeDeserialization) ID() string { - return "InterceptBeforeDeserialization" -} - -// HandleDeserialize runs the interceptors. -func (m *InterceptBeforeDeserialization) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, md middleware.Metadata, err error, -) { - out, md, err = next.HandleDeserialize(ctx, in) - if err != nil { - var terr *RequestSendError - if errors.As(err, &terr) { - return out, md, err - } - } - - for _, i := range m.Interceptors { - if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return out, md, err -} - -// InterceptAfterDeserialization runs AfterDeserializationInterceptors. -type InterceptAfterDeserialization struct { - Interceptors []AfterDeserializationInterceptor -} - -// ID identifies the middleware. -func (m *InterceptAfterDeserialization) ID() string { - return "InterceptAfterDeserialization" -} - -// HandleDeserialize runs the interceptors. -func (m *InterceptAfterDeserialization) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, md middleware.Metadata, err error, -) { - out, md, err = next.HandleDeserialize(ctx, in) - if err != nil { - var terr *RequestSendError - if errors.As(err, &terr) { - return out, md, err - } - } - - getIctx(ctx).Output = out.Result - - for _, i := range m.Interceptors { - if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return out, md, err -} - -// InterceptAttempt runs AfterAttemptInterceptors. -type InterceptAttempt struct { - BeforeAttempt []BeforeAttemptInterceptor - AfterAttempt []AfterAttemptInterceptor -} - -// ID identifies the middleware. -func (m *InterceptAttempt) ID() string { - return "InterceptAttempt" -} - -// HandleFinalize runs the interceptors. -func (m *InterceptAttempt) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, md middleware.Metadata, err error, -) { - for _, i := range m.BeforeAttempt { - if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - out, md, err = next.HandleFinalize(ctx, in) - - for _, i := range m.AfterAttempt { - if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil { - return out, md, err - } - } - - return out, md, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go deleted file mode 100644 index 941a8d6b5123..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go +++ /dev/null @@ -1,75 +0,0 @@ -package io - -import ( - "io" - "sync" -) - -// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. -func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { - sr := &safeReadCloser{ - readCloser: readCloser, - } - - if _, ok := readCloser.(io.WriterTo); ok { - return &safeWriteToReadCloser{safeReadCloser: sr} - } - - return sr -} - -// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic -// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this -// type. -type safeWriteToReadCloser struct { - *safeReadCloser -} - -// WriteTo implements the io.WriteTo interface. -func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { - r.safeReadCloser.mtx.Lock() - defer r.safeReadCloser.mtx.Unlock() - - if r.safeReadCloser.closed { - return 0, io.EOF - } - - return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) -} - -// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser -// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type -// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime -// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. -// This type is thread-safe. -type safeReadCloser struct { - readCloser io.ReadCloser - closed bool - mtx sync.Mutex -} - -// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. -func (r *safeReadCloser) Read(p []byte) (n int, err error) { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.closed { - return 0, io.EOF - } - - return r.readCloser.Read(p) -} - -// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error -// reported from Close. Subsequent calls to Close will always return a nil error. -func (r *safeReadCloser) Close() error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.closed { - return nil - } - - r.closed = true - rc := r.readCloser - r.readCloser = nil - return rc.Close() -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go deleted file mode 100644 index 5d6a4b23a27a..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go +++ /dev/null @@ -1,25 +0,0 @@ -package http - -import ( - "crypto/md5" - "encoding/base64" - "fmt" - "io" -) - -// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. -// Returns the byte slice of md5 checksum and an error. -func computeMD5Checksum(r io.Reader) ([]byte, error) { - h := md5.New() - // copy errors may be assumed to be from the body. - _, err := io.Copy(h, r) - if err != nil { - return nil, fmt.Errorf("failed to read body: %w", err) - } - - // encode the md5 checksum in base64. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - return sum64, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go deleted file mode 100644 index d1beaa595d97..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/metrics.go +++ /dev/null @@ -1,198 +0,0 @@ -package http - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "sync/atomic" - "time" - - "github.com/aws/smithy-go/metrics" -) - -var now = time.Now - -// withMetrics instruments an HTTP client and context to collect HTTP metrics. -func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) ( - context.Context, ClientDo, error, -) { - hm, err := newHTTPMetrics(meter) - if err != nil { - return nil, nil, err - } - - ctx := httptrace.WithClientTrace(parent, &httptrace.ClientTrace{ - DNSStart: hm.DNSStart, - ConnectStart: hm.ConnectStart, - TLSHandshakeStart: hm.TLSHandshakeStart, - - GotConn: hm.GotConn(parent), - PutIdleConn: hm.PutIdleConn(parent), - ConnectDone: hm.ConnectDone(parent), - DNSDone: hm.DNSDone(parent), - TLSHandshakeDone: hm.TLSHandshakeDone(parent), - GotFirstResponseByte: hm.GotFirstResponseByte(parent), - }) - return ctx, &timedClientDo{client, hm}, nil -} - -type timedClientDo struct { - ClientDo - hm *httpMetrics -} - -func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) { - c.hm.doStart.Store(now()) - resp, err := c.ClientDo.Do(r) - - c.hm.DoRequestDuration.Record(r.Context(), c.hm.doStart.Elapsed()) - return resp, err -} - -type httpMetrics struct { - DNSLookupDuration metrics.Float64Histogram // client.http.connections.dns_lookup_duration - ConnectDuration metrics.Float64Histogram // client.http.connections.acquire_duration - TLSHandshakeDuration metrics.Float64Histogram // client.http.connections.tls_handshake_duration - ConnectionUsage metrics.Int64UpDownCounter // client.http.connections.usage - - DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration - TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte - - doStart safeTime - dnsStart safeTime - connectStart safeTime - tlsStart safeTime -} - -func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { - hm := &httpMetrics{} - - var err error - hm.DNSLookupDuration, err = meter.Float64Histogram("client.http.connections.dns_lookup_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes a request to perform DNS lookup." - }) - if err != nil { - return nil, err - } - hm.ConnectDuration, err = meter.Float64Histogram("client.http.connections.acquire_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes a request to acquire a connection." - }) - if err != nil { - return nil, err - } - hm.TLSHandshakeDuration, err = meter.Float64Histogram("client.http.connections.tls_handshake_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes an HTTP request to perform the TLS handshake." - }) - if err != nil { - return nil, err - } - hm.ConnectionUsage, err = meter.Int64UpDownCounter("client.http.connections.usage", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "{connection}" - o.Description = "Current state of connections pool." - }) - if err != nil { - return nil, err - } - hm.DoRequestDuration, err = meter.Float64Histogram("client.http.do_request_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "Time spent performing an entire HTTP transaction." - }) - if err != nil { - return nil, err - } - hm.TimeToFirstByte, err = meter.Float64Histogram("client.http.time_to_first_byte", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "Time from start of transaction to when the first response byte is available." - }) - if err != nil { - return nil, err - } - - return hm, nil -} - -func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) { - m.dnsStart.Store(now()) -} - -func (m *httpMetrics) ConnectStart(string, string) { - m.connectStart.Store(now()) -} - -func (m *httpMetrics) TLSHandshakeStart() { - m.tlsStart.Store(now()) -} - -func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) { - return func(httptrace.GotConnInfo) { - m.addConnAcquired(ctx, 1) - } -} - -func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) { - return func(error) { - m.addConnAcquired(ctx, -1) - } -} - -func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) { - return func(httptrace.DNSDoneInfo) { - m.DNSLookupDuration.Record(ctx, m.dnsStart.Elapsed()) - } -} - -func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) { - return func(string, string, error) { - m.ConnectDuration.Record(ctx, m.connectStart.Elapsed()) - } -} - -func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) { - return func(tls.ConnectionState, error) { - m.TLSHandshakeDuration.Record(ctx, m.tlsStart.Elapsed()) - } -} - -func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() { - return func() { - m.TimeToFirstByte.Record(ctx, m.doStart.Elapsed()) - } -} - -func (m *httpMetrics) addConnAcquired(ctx context.Context, incr int64) { - m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("state", "acquired") - }) -} - -// Not used: it is recommended to track acquired vs idle conn, but we can't -// determine when something is truly idle with the current HTTP client hooks -// available to us. -func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) { - m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("state", "idle") - }) -} - -type safeTime struct { - atomic.Value // time.Time -} - -func (st *safeTime) Store(v time.Time) { - st.Value.Store(v) -} - -func (st *safeTime) Load() time.Time { - t, _ := st.Value.Load().(time.Time) - return t -} - -func (st *safeTime) Elapsed() float64 { - end := now() - elapsed := end.Sub(st.Load()) - return float64(elapsed) / 1e9 -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go deleted file mode 100644 index 914338f2e75f..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go +++ /dev/null @@ -1,79 +0,0 @@ -package http - -import ( - "context" - "io" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically -// close the response body of an operation request if the request response -// failed. -func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) -} - -type errorCloseResponseBodyMiddleware struct{} - -func (*errorCloseResponseBodyMiddleware) ID() string { - return "ErrorCloseResponseBody" -} - -func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err := next.HandleDeserialize(ctx, input) - if err != nil { - if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { - // Consume the full body to prevent TCP connection resets on some platforms - _, _ = io.Copy(io.Discard, resp.Body) - // Do not validate that the response closes successfully. - resp.Body.Close() - } - } - - return out, metadata, err -} - -// AddCloseResponseBodyMiddleware adds the middleware to automatically close -// the response body of an operation request, after the response had been -// deserialized. -func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) -} - -type closeResponseBody struct{} - -func (*closeResponseBody) ID() string { - return "CloseResponseBody" -} - -func (m *closeResponseBody) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err := next.HandleDeserialize(ctx, input) - if err != nil { - return out, metadata, err - } - - if resp, ok := out.RawResponse.(*Response); ok { - // Consume the full body to prevent TCP connection resets on some platforms - _, copyErr := io.Copy(io.Discard, resp.Body) - if copyErr != nil { - middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") - } - - closeErr := resp.Body.Close() - if closeErr != nil { - middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go deleted file mode 100644 index 9969389bb29d..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go +++ /dev/null @@ -1,84 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -// ComputeContentLength provides a middleware to set the content-length -// header for the length of a serialize request body. -type ComputeContentLength struct { -} - -// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware -// stack's Build step. -func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&ComputeContentLength{}, middleware.After) -} - -// ID returns the identifier for the ComputeContentLength. -func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } - -// HandleBuild adds the length of the serialized request to the HTTP header -// if the length can be determined. -func (m *ComputeContentLength) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // do nothing if request content-length was set to 0 or above. - if req.ContentLength >= 0 { - return next.HandleBuild(ctx, in) - } - - // attempt to compute stream length - if n, ok, err := req.StreamLength(); err != nil { - return out, metadata, fmt.Errorf( - "failed getting length of request stream, %w", err) - } else if ok { - req.ContentLength = n - } - - return next.HandleBuild(ctx, in) -} - -// validateContentLength provides a middleware to validate the content-length -// is valid (greater than zero), for the serialized request payload. -type validateContentLength struct{} - -// ValidateContentLengthHeader adds middleware that validates request content-length -// is set to value greater than zero. -func ValidateContentLengthHeader(stack *middleware.Stack) error { - return stack.Build.Add(&validateContentLength{}, middleware.After) -} - -// ID returns the identifier for the ComputeContentLength. -func (m *validateContentLength) ID() string { return "ValidateContentLength" } - -// HandleBuild adds the length of the serialized request to the HTTP header -// if the length can be determined. -func (m *validateContentLength) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // if request content-length was set to less than 0, return an error - if req.ContentLength < 0 { - return out, metadata, fmt.Errorf( - "content length for payload is required and must be at least 0") - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go deleted file mode 100644 index 855c22720315..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go +++ /dev/null @@ -1,81 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - "github.com/aws/smithy-go/middleware" -) - -// WithHeaderComment instruments a middleware stack to append an HTTP field -// comment to the given header as specified in RFC 9110 -// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). -// -// The header is case-insensitive. If the provided header exists when the -// middleware runs, the content will be inserted as-is enclosed in parentheses. -// -// Note that per the HTTP specification, comments are only allowed in fields -// containing "comment" as part of their field value definition, but this API -// will NOT verify whether the provided header is one of them. -// -// WithHeaderComment MAY be applied more than once to a middleware stack and/or -// more than once per header. -func WithHeaderComment(header, content string) func(*middleware.Stack) error { - return func(s *middleware.Stack) error { - m, err := getOrAddHeaderComment(s) - if err != nil { - return fmt.Errorf("get or add header comment: %v", err) - } - - m.values.Add(header, content) - return nil - } -} - -type headerCommentMiddleware struct { - values http.Header // hijack case-insensitive access APIs -} - -func (*headerCommentMiddleware) ID() string { - return "headerComment" -} - -func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - r, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - for h, contents := range m.values { - for _, c := range contents { - if existing := r.Header.Get(h); existing != "" { - r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) - } - } - } - - return next.HandleBuild(ctx, in) -} - -func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { - id := (*headerCommentMiddleware)(nil).ID() - m, ok := s.Build.Get(id) - if !ok { - m := &headerCommentMiddleware{values: http.Header{}} - if err := s.Build.Add(m, middleware.After); err != nil { - return nil, fmt.Errorf("add build: %v", err) - } - - return m, nil - } - - hc, ok := m.(*headerCommentMiddleware) - if !ok { - return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) - } - - return hc, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go deleted file mode 100644 index eac32b4babdf..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go +++ /dev/null @@ -1,167 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -type isContentTypeAutoSet struct{} - -// SetIsContentTypeDefaultValue returns a Context specifying if the request's -// content-type header was set to a default value. -func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { - return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) -} - -// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the -// request is a default value that was auto assigned by an operation -// serializer. Allows middleware post serialization to know if the content-type -// was auto set to a default value or not. -// -// Also returns false if the Context value was never updated to include if -// content-type was set to a default value. -func GetIsContentTypeDefaultValue(ctx context.Context) bool { - v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) - return v -} - -// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover -// middleware to the stack after the operation serializer. This middleware will -// remove the content-type header from the request if it was set as a default -// value, and no request payload is present. -// -// Returns error if unable to add the middleware. -func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { - err = stack.Serialize.Insert(removeDefaultContentType{}, - "OperationSerializer", middleware.After) - if err != nil { - return fmt.Errorf("failed to add %s serialize middleware, %w", - removeDefaultContentType{}.ID(), err) - } - - return nil -} - -// RemoveNoPayloadDefaultContentTypeRemover removes the -// DefaultContentTypeRemover middleware from the stack. Returns an error if -// unable to remove the middleware. -func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { - _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) - if err != nil { - return fmt.Errorf("failed to remove %s serialize middleware, %w", - removeDefaultContentType{}.ID(), err) - - } - return nil -} - -// removeDefaultContentType provides after serialization middleware that will -// remove the content-type header from an HTTP request if the header was set as -// a default value by the operation serializer, and there is no request payload. -type removeDefaultContentType struct{} - -// ID returns the middleware ID -func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } - -// HandleSerialize implements the serialization middleware. -func (removeDefaultContentType) HandleSerialize( - ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, meta middleware.Metadata, err error, -) { - req, ok := input.Request.(*Request) - if !ok { - return out, meta, fmt.Errorf( - "unexpected request type %T for removeDefaultContentType middleware", - input.Request) - } - - if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { - req.Header.Del("Content-Type") - input.Request = req - } - - return next.HandleSerialize(ctx, input) -} - -type headerValue struct { - header string - value string - append bool -} - -type headerValueHelper struct { - headerValues []headerValue -} - -func (h *headerValueHelper) addHeaderValue(value headerValue) { - h.headerValues = append(h.headerValues, value) -} - -func (h *headerValueHelper) ID() string { - return "HTTPHeaderHelper" -} - -func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - for _, value := range h.headerValues { - if value.append { - req.Header.Add(value.header, value.value) - } else { - req.Header.Set(value.header, value.value) - } - } - - return next.HandleBuild(ctx, in) -} - -func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { - id := (*headerValueHelper)(nil).ID() - m, ok := stack.Build.Get(id) - if !ok { - m = &headerValueHelper{} - err := stack.Build.Add(m, middleware.After) - if err != nil { - return nil, err - } - } - - requestUserAgent, ok := m.(*headerValueHelper) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) - } - - return requestUserAgent, nil -} - -// AddHeaderValue returns a stack mutator that adds the header value pair to header. -// Appends to any existing values if present. -func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { - return func(stack *middleware.Stack) error { - helper, err := getOrAddHeaderValueHelper(stack) - if err != nil { - return err - } - helper.addHeaderValue(headerValue{header: header, value: value, append: true}) - return nil - } -} - -// SetHeaderValue returns a stack mutator that adds the header value pair to header. -// Replaces any existing values if present. -func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { - return func(stack *middleware.Stack) error { - helper, err := getOrAddHeaderValueHelper(stack) - if err != nil { - return err - } - helper.addHeaderValue(headerValue{header: header, value: value, append: false}) - return nil - } -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go deleted file mode 100644 index d5909b0a242a..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go +++ /dev/null @@ -1,75 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http/httputil" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally -// their respective bodies. Will not perform any logging if none of the options are set. -type RequestResponseLogger struct { - LogRequest bool - LogRequestWithBody bool - - LogResponse bool - LogResponseWithBody bool -} - -// ID is the middleware identifier. -func (r *RequestResponseLogger) ID() string { - return "RequestResponseLogger" -} - -// HandleDeserialize will log the request and response HTTP messages if configured accordingly. -func (r *RequestResponseLogger) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - logger := middleware.GetLogger(ctx) - - if r.LogRequest || r.LogRequestWithBody { - smithyRequest, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - rc := smithyRequest.Build(ctx) - reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) - if err != nil { - return out, metadata, err - } - - logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) - - if r.LogRequestWithBody { - smithyRequest, err = smithyRequest.SetStream(rc.Body) - if err != nil { - return out, metadata, err - } - in.Request = smithyRequest - } - } - - out, metadata, err = next.HandleDeserialize(ctx, in) - - if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { - smithyResponse, ok := out.RawResponse.(*Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) - } - - respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) - if err != nil { - return out, metadata, fmt.Errorf("failed to dump response %w", err) - } - - logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go deleted file mode 100644 index d6079b25950c..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go +++ /dev/null @@ -1,51 +0,0 @@ -package http - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type ( - hostnameImmutableKey struct{} - hostPrefixDisableKey struct{} -) - -// GetHostnameImmutable retrieves whether the endpoint hostname should be considered -// immutable or not. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func GetHostnameImmutable(ctx context.Context) (v bool) { - v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) - return v -} - -// SetHostnameImmutable sets or modifies whether the request's endpoint hostname -// should be considered immutable or not. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func SetHostnameImmutable(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) -} - -// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is -// disabled. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { - v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) - return v -} - -// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host -// prefixing should be disabled. If value is true, endpoint host prefixing -// will be disabled. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go deleted file mode 100644 index 326cb8a6cab9..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go +++ /dev/null @@ -1,79 +0,0 @@ -package http - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - "strings" -) - -// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum -// HTTP protocol version. -type MinimumProtocolError struct { - proto string - expectedProtoMajor int - expectedProtoMinor int -} - -// Error returns the error message. -func (m *MinimumProtocolError) Error() string { - return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - m.expectedProtoMajor, m.expectedProtoMinor, m.proto) -} - -// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection -// meets the minimum major ad minor version. -type RequireMinimumProtocol struct { - ProtoMajor int - ProtoMinor int -} - -// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum -// protocol major and minor version. -func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { - return stack.Deserialize.Insert(&RequireMinimumProtocol{ - ProtoMajor: major, - ProtoMinor: minor, - }, "OperationDeserializer", middleware.Before) -} - -// ID returns the middleware identifier string. -func (r *RequireMinimumProtocol) ID() string { - return "RequireMinimumProtocol" -} - -// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor -// protocol version. -func (r *RequireMinimumProtocol) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) - } - - if !strings.HasPrefix(response.Proto, "HTTP") { - return out, metadata, &MinimumProtocolError{ - proto: response.Proto, - expectedProtoMajor: r.ProtoMajor, - expectedProtoMinor: r.ProtoMinor, - } - } - - if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { - return out, metadata, &MinimumProtocolError{ - proto: response.Proto, - expectedProtoMajor: r.ProtoMajor, - expectedProtoMinor: r.ProtoMinor, - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go deleted file mode 100644 index c65aa3932015..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/properties.go +++ /dev/null @@ -1,80 +0,0 @@ -package http - -import smithy "github.com/aws/smithy-go" - -type ( - sigV4SigningNameKey struct{} - sigV4SigningRegionKey struct{} - - sigV4ASigningNameKey struct{} - sigV4ASigningRegionsKey struct{} - - isUnsignedPayloadKey struct{} - disableDoubleEncodingKey struct{} -) - -// GetSigV4SigningName gets the signing name from Properties. -func GetSigV4SigningName(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4SigningNameKey{}).(string) - return v, ok -} - -// SetSigV4SigningName sets the signing name on Properties. -func SetSigV4SigningName(p *smithy.Properties, name string) { - p.Set(sigV4SigningNameKey{}, name) -} - -// GetSigV4SigningRegion gets the signing region from Properties. -func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4SigningRegionKey{}).(string) - return v, ok -} - -// SetSigV4SigningRegion sets the signing region on Properties. -func SetSigV4SigningRegion(p *smithy.Properties, region string) { - p.Set(sigV4SigningRegionKey{}, region) -} - -// GetSigV4ASigningName gets the v4a signing name from Properties. -func GetSigV4ASigningName(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4ASigningNameKey{}).(string) - return v, ok -} - -// SetSigV4ASigningName sets the signing name on Properties. -func SetSigV4ASigningName(p *smithy.Properties, name string) { - p.Set(sigV4ASigningNameKey{}, name) -} - -// GetSigV4ASigningRegion gets the v4a signing region set from Properties. -func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) { - v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string) - return v, ok -} - -// SetSigV4ASigningRegions sets the v4a signing region set on Properties. -func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) { - p.Set(sigV4ASigningRegionsKey{}, regions) -} - -// GetIsUnsignedPayload gets whether the payload is unsigned from Properties. -func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) { - v, ok := p.Get(isUnsignedPayloadKey{}).(bool) - return v, ok -} - -// SetIsUnsignedPayload sets whether the payload is unsigned on Properties. -func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) { - p.Set(isUnsignedPayloadKey{}, isUnsignedPayload) -} - -// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties. -func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) { - v, ok := p.Get(disableDoubleEncodingKey{}).(bool) - return v, ok -} - -// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties. -func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) { - p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go deleted file mode 100644 index 5cbf6f10acc3..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ /dev/null @@ -1,188 +0,0 @@ -package http - -import ( - "context" - "fmt" - "io" - "net/http" - "net/url" - "strings" - - iointernal "github.com/aws/smithy-go/transport/http/internal/io" -) - -// Request provides the HTTP specific request structure for HTTP specific -// middleware steps to use to serialize input, and send an operation's request. -type Request struct { - *http.Request - stream io.Reader - isStreamSeekable bool - streamStartPos int64 -} - -// NewStackRequest returns an initialized request ready to be populated with the -// HTTP request details. Returns empty interface so the function can be used as -// a parameter to the Smithy middleware Stack constructor. -func NewStackRequest() interface{} { - return &Request{ - Request: &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - ContentLength: -1, // default to unknown length - }, - } -} - -// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. -func (r *Request) IsHTTPS() bool { - if r.URL == nil { - return false - } - return strings.EqualFold(r.URL.Scheme, "https") -} - -// Clone returns a deep copy of the Request for the new context. A reference to -// the Stream is copied, but the underlying stream is not copied. -func (r *Request) Clone() *Request { - rc := *r - rc.Request = rc.Request.Clone(context.TODO()) - return &rc -} - -// StreamLength returns the number of bytes of the serialized stream attached -// to the request and ok set. If the length cannot be determined, an error will -// be returned. -func (r *Request) StreamLength() (size int64, ok bool, err error) { - return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) -} - -func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { - if stream == nil { - return 0, true, nil - } - - if l, ok := stream.(interface{ Len() int }); ok { - return int64(l.Len()), true, nil - } - - if !seekable { - return 0, false, nil - } - - s := stream.(io.Seeker) - endOffset, err := s.Seek(0, io.SeekEnd) - if err != nil { - return 0, false, err - } - - // The reason to seek to streamStartPos instead of 0 is to ensure that the - // SDK only sends the stream from the starting position the user's - // application provided it to the SDK at. For example application opens a - // file, and wants to skip the first N bytes uploading the rest. The - // application would move the file's offset N bytes, then hand it off to - // the SDK to send the remaining. The SDK should respect that initial offset. - _, err = s.Seek(startPos, io.SeekStart) - if err != nil { - return 0, false, err - } - - return endOffset - startPos, true, nil -} - -// RewindStream will rewind the io.Reader to the relative start position if it -// is an io.Seeker. -func (r *Request) RewindStream() error { - // If there is no stream there is nothing to rewind. - if r.stream == nil { - return nil - } - - if !r.isStreamSeekable { - return fmt.Errorf("request stream is not seekable") - } - _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) - return err -} - -// GetStream returns the request stream io.Reader if a stream is set. If no -// stream is present nil will be returned. -func (r *Request) GetStream() io.Reader { - return r.stream -} - -// IsStreamSeekable returns whether the stream is seekable. -func (r *Request) IsStreamSeekable() bool { - return r.isStreamSeekable -} - -// SetStream returns a clone of the request with the stream set to the provided -// reader. May return an error if the provided reader is seekable but returns -// an error. -func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { - rc = r.Clone() - - if reader == http.NoBody { - reader = nil - } - - var isStreamSeekable bool - var streamStartPos int64 - switch v := reader.(type) { - case io.Seeker: - n, err := v.Seek(0, io.SeekCurrent) - if err != nil { - return r, err - } - isStreamSeekable = true - streamStartPos = n - default: - // If the stream length can be determined, and is determined to be empty, - // use a nil stream to prevent confusion between empty vs not-empty - // streams. - length, ok, err := streamLength(reader, false, 0) - if err != nil { - return nil, err - } else if ok && length == 0 { - reader = nil - } - } - - rc.stream = reader - rc.isStreamSeekable = isStreamSeekable - rc.streamStartPos = streamStartPos - - return rc, err -} - -// Build returns a build standard HTTP request value from the Smithy request. -// The request's stream is wrapped in a safe container that allows it to be -// reused for subsequent attempts. -func (r *Request) Build(ctx context.Context) *http.Request { - req := r.Request.Clone(ctx) - - if r.stream == nil && req.ContentLength == -1 { - req.ContentLength = 0 - } - - switch stream := r.stream.(type) { - case *io.PipeReader: - req.Body = io.NopCloser(stream) - req.ContentLength = -1 - default: - // HTTP Client Request must only have a non-nil body if the - // ContentLength is explicitly unknown (-1) or non-zero. The HTTP - // Client will interpret a non-nil body and ContentLength 0 as - // "unknown". This is unwanted behavior. - if req.ContentLength != 0 && r.stream != nil { - req.Body = iointernal.NewSafeReadCloser(io.NopCloser(stream)) - } - } - - return req -} - -// RequestCloner is a function that can take an input request type and clone the request -// for use in a subsequent retry attempt. -func RequestCloner(v interface{}) interface{} { - return v.(*Request).Clone() -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go deleted file mode 100644 index 0c13bfcc8e2c..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/response.go +++ /dev/null @@ -1,34 +0,0 @@ -package http - -import ( - "fmt" - "net/http" -) - -// Response provides the HTTP specific response structure for HTTP specific -// middleware steps to use to deserialize the response from an operation call. -type Response struct { - *http.Response -} - -// ResponseError provides the HTTP centric error type wrapping the underlying -// error with the HTTP response value. -type ResponseError struct { - Response *Response - Err error -} - -// HTTPStatusCode returns the HTTP response status code received from the service. -func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } - -// HTTPResponse returns the HTTP response received from the service. -func (e *ResponseError) HTTPResponse() *Response { return e.Response } - -// Unwrap returns the nested error if any, or nil. -func (e *ResponseError) Unwrap() error { return e.Err } - -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "http response error StatusCode: %d, %v", - e.Response.StatusCode, e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go deleted file mode 100644 index 607b196a8bdd..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/time.go +++ /dev/null @@ -1,13 +0,0 @@ -package http - -import ( - "time" - - smithytime "github.com/aws/smithy-go/time" -) - -// ParseTime parses a time string like the HTTP Date header. This uses a more -// relaxed rule set for date parsing compared to the standard library. -func ParseTime(text string) (t time.Time, err error) { - return smithytime.ParseHTTPDate(text) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go deleted file mode 100644 index 60a5fc1002a9..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/url.go +++ /dev/null @@ -1,44 +0,0 @@ -package http - -import "strings" - -// JoinPath returns an absolute URL path composed of the two paths provided. -// Enforces that the returned path begins with '/'. If added path is empty the -// returned path suffix will match the first parameter suffix. -func JoinPath(a, b string) string { - if len(a) == 0 { - a = "/" - } else if a[0] != '/' { - a = "/" + a - } - - if len(b) != 0 && b[0] == '/' { - b = b[1:] - } - - if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { - a = a + "/" - } - - return a + b -} - -// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' -// will be collapsed to single separator between values. -func JoinRawQuery(a, b string) string { - a = strings.TrimFunc(a, isAmpersand) - b = strings.TrimFunc(b, isAmpersand) - - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - - return a + "&" + b -} - -func isAmpersand(v rune) bool { - return v == '&' -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go deleted file mode 100644 index 71a7e0d8af55..000000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package http - -import ( - "strings" -) - -// UserAgentBuilder is a builder for a HTTP User-Agent string. -type UserAgentBuilder struct { - sb strings.Builder -} - -// NewUserAgentBuilder returns a new UserAgentBuilder. -func NewUserAgentBuilder() *UserAgentBuilder { - return &UserAgentBuilder{sb: strings.Builder{}} -} - -// AddKey adds the named component/product to the agent string -func (u *UserAgentBuilder) AddKey(key string) { - u.appendTo(key) -} - -// AddKeyValue adds the named key to the agent string with the given value. -func (u *UserAgentBuilder) AddKeyValue(key, value string) { - u.appendTo(key + "/" + value) -} - -// Build returns the constructed User-Agent string. May be called multiple times. -func (u *UserAgentBuilder) Build() string { - return u.sb.String() -} - -func (u *UserAgentBuilder) appendTo(value string) { - if u.sb.Len() > 0 { - u.sb.WriteRune(' ') - } - u.sb.WriteString(value) -} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go deleted file mode 100644 index b5eedc1f90ab..000000000000 --- a/vendor/github.com/aws/smithy-go/validation.go +++ /dev/null @@ -1,140 +0,0 @@ -package smithy - -import ( - "bytes" - "fmt" - "strings" -) - -// An InvalidParamsError provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type InvalidParamsError struct { - // Context is the base context of the invalid parameter group. - Context string - errs []InvalidParamError -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *InvalidParamsError) Add(err InvalidParamError) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another InvalidParamsError -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e *InvalidParamsError) Len() int { - return len(e.errs) -} - -// Error returns the string formatted form of the invalid parameters. -func (e InvalidParamsError) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Error()) - } - - return w.String() -} - -// Errs returns a slice of the invalid parameters -func (e InvalidParamsError) Errs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An InvalidParamError represents an invalid parameter error type. -type InvalidParamError interface { - error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type invalidParamError struct { - context string - nestedContext string - field string - reason string -} - -// Error returns the string version of the invalid parameter error. -func (e invalidParamError) Error() string { - return fmt.Sprintf("%s, %s.", e.reason, e.Field()) -} - -// Field Returns the field and context the error occurred. -func (e invalidParamError) Field() string { - sb := &strings.Builder{} - sb.WriteString(e.context) - if sb.Len() > 0 { - if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { - sb.WriteRune('.') - } - } - if len(e.nestedContext) > 0 { - sb.WriteString(e.nestedContext) - sb.WriteRune('.') - } - sb.WriteString(e.field) - return sb.String() -} - -// SetContext updates the base context of the error. -func (e *invalidParamError) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *invalidParamError) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - return - } - // Check if our nested context is an index into a slice or map - if e.nestedContext[:1] != "[" { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - return - } - e.nestedContext = ctx + e.nestedContext -} - -// An ParamRequiredError represents an required parameter error. -type ParamRequiredError struct { - invalidParamError -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ParamRequiredError { - return &ParamRequiredError{ - invalidParamError{ - field: field, - reason: fmt.Sprintf("missing required field"), - }, - } -} diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go deleted file mode 100644 index 8d70a03ff2f4..000000000000 --- a/vendor/github.com/aws/smithy-go/waiter/logger.go +++ /dev/null @@ -1,36 +0,0 @@ -package waiter - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// Logger is the Logger middleware used by the waiter to log an attempt -type Logger struct { - // Attempt is the current attempt to be logged - Attempt int64 -} - -// ID representing the Logger middleware -func (*Logger) ID() string { - return "WaiterLogger" -} - -// HandleInitialize performs handling of request in initialize stack step -func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - logger := middleware.GetLogger(ctx) - - logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt)) - - return next.HandleInitialize(ctx, in) -} - -// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in -func (m Logger) AddLogger(stack *middleware.Stack) error { - return stack.Initialize.Insert(&m, "SetLogger", middleware.After) -} diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go deleted file mode 100644 index 03e46e2ee72c..000000000000 --- a/vendor/github.com/aws/smithy-go/waiter/waiter.go +++ /dev/null @@ -1,66 +0,0 @@ -package waiter - -import ( - "fmt" - "math" - "time" - - "github.com/aws/smithy-go/rand" -) - -// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count, -// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay -// must always be greater than 0, along with minDelay lesser than or equal to maxDelay. -// -// Returns the computed delay and if next attempt count is possible within the given input time constraints. -// Note that the zeroth attempt results in no delay. -func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) { - // zeroth attempt, no delay - if attempt <= 0 { - return 0, nil - } - - // remainingTime is zero or less, no delay - if remainingTime <= 0 { - return 0, nil - } - - // validate min delay is greater than 0 - if minDelay == 0 { - return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay") - } - - // validate max delay is greater than 0 - if maxDelay == 0 { - return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay") - } - - // Get attempt ceiling to prevent integer overflow. - attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1 - - if attempt > int64(attemptCeiling) { - delay = maxDelay - } else { - // Compute exponential delay based on attempt. - ri := 1 << uint64(attempt-1) - // compute delay - delay = minDelay * time.Duration(ri) - } - - if delay != minDelay { - // randomize to get jitter between min delay and delay value - d, err := rand.CryptoRandInt63n(int64(delay - minDelay)) - if err != nil { - return 0, fmt.Errorf("error computing retry jitter, %w", err) - } - - delay = time.Duration(d) + minDelay - } - - // check if this is the last attempt possible and compute delay accordingly - if remainingTime-delay <= minDelay { - delay = remainingTime - minDelay - } - - return delay, nil -} diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml new file mode 100644 index 000000000000..ac12e485a156 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go_import_path: github.com/dustin/go-humanize +go: + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - diff -u <(echo -n) <(gofmt -d -s .) + - go vet . + - go install -v -race ./... + - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE new file mode 100644 index 000000000000..8d9a94a90680 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown new file mode 100644 index 000000000000..7d0b16b34f5a --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -0,0 +1,124 @@ +# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) + +Just a few functions for helping humanize times and sizes. + +`go get` it as `github.com/dustin/go-humanize`, import it as +`"github.com/dustin/go-humanize"`, use it as `humanize`. + +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for +complete documentation. + +## Sizes + +This lets you take numbers like `82854982` and convert them to useful +strings like, `83 MB` or `79 MiB` (whichever you prefer). + +Example: + +```go +fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. +``` + +## Times + +This lets you take a `time.Time` and spit it out in relative terms. +For example, `12 seconds ago` or `3 days from now`. + +Example: + +```go +fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. +``` + +Thanks to Kyle Lemons for the time implementation from an IRC +conversation one day. It's pretty neat. + +## Ordinals + +From a [mailing list discussion][odisc] where a user wanted to be able +to label ordinals. + + 0 -> 0th + 1 -> 1st + 2 -> 2nd + 3 -> 3rd + 4 -> 4th + [...] + +Example: + +```go +fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. +``` + +## Commas + +Want to shove commas into numbers? Be my guest. + + 0 -> 0 + 100 -> 100 + 1000 -> 1,000 + 1000000000 -> 1,000,000,000 + -100000 -> -100,000 + +Example: + +```go +fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. +``` + +## Ftoa + +Nicer float64 formatter that removes trailing zeros. + +```go +fmt.Printf("%f", 2.24) // 2.240000 +fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 +fmt.Printf("%f", 2.0) // 2.000000 +fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 +``` + +## SI notation + +Format numbers with [SI notation][sinotation]. + +Example: + +```go +humanize.SI(0.00000000223, "M") // 2.23 nM +``` + +## English-specific functions + +The following functions are in the `humanize/english` subpackage. + +### Plurals + +Simple English pluralization + +```go +english.PluralWord(1, "object", "") // object +english.PluralWord(42, "object", "") // objects +english.PluralWord(2, "bus", "") // buses +english.PluralWord(99, "locus", "loci") // loci + +english.Plural(1, "object", "") // 1 object +english.Plural(42, "object", "") // 42 objects +english.Plural(2, "bus", "") // 2 buses +english.Plural(99, "locus", "loci") // 99 loci +``` + +### Word series + +Format comma-separated words lists with conjuctions: + +```go +english.WordSeries([]string{"foo"}, "and") // foo +english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar +english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz + +english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz +``` + +[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion +[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go new file mode 100644 index 000000000000..f49dc337dcd7 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/big.go @@ -0,0 +1,31 @@ +package humanize + +import ( + "math/big" +) + +// order of magnitude (to a max order) +func oomm(n, b *big.Int, maxmag int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + if mag == maxmag && maxmag >= 0 { + break + } + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} + +// total order of magnitude +// (same as above, but with no upper limit) +func oom(n, b *big.Int) (float64, int) { + mag := 0 + m := &big.Int{} + for n.Cmp(b) >= 0 { + n.DivMod(n, b, m) + mag++ + } + return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag +} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go new file mode 100644 index 000000000000..3b015fd59ecd --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -0,0 +1,189 @@ +package humanize + +import ( + "fmt" + "math/big" + "strings" + "unicode" +) + +var ( + bigIECExp = big.NewInt(1024) + + // BigByte is one byte in bit.Ints + BigByte = big.NewInt(1) + // BigKiByte is 1,024 bytes in bit.Ints + BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) + // BigMiByte is 1,024 k bytes in bit.Ints + BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) + // BigGiByte is 1,024 m bytes in bit.Ints + BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) + // BigTiByte is 1,024 g bytes in bit.Ints + BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) + // BigPiByte is 1,024 t bytes in bit.Ints + BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) + // BigEiByte is 1,024 p bytes in bit.Ints + BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) + // BigZiByte is 1,024 e bytes in bit.Ints + BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) + // BigYiByte is 1,024 z bytes in bit.Ints + BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) +) + +var ( + bigSIExp = big.NewInt(1000) + + // BigSIByte is one SI byte in big.Ints + BigSIByte = big.NewInt(1) + // BigKByte is 1,000 SI bytes in big.Ints + BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) + // BigMByte is 1,000 SI k bytes in big.Ints + BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) + // BigGByte is 1,000 SI m bytes in big.Ints + BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) + // BigTByte is 1,000 SI g bytes in big.Ints + BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) + // BigPByte is 1,000 SI t bytes in big.Ints + BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) + // BigEByte is 1,000 SI p bytes in big.Ints + BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) + // BigZByte is 1,000 SI e bytes in big.Ints + BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) + // BigYByte is 1,000 SI z bytes in big.Ints + BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) +) + +var bigBytesSizeTable = map[string]*big.Int{ + "b": BigByte, + "kib": BigKiByte, + "kb": BigKByte, + "mib": BigMiByte, + "mb": BigMByte, + "gib": BigGiByte, + "gb": BigGByte, + "tib": BigTiByte, + "tb": BigTByte, + "pib": BigPiByte, + "pb": BigPByte, + "eib": BigEiByte, + "eb": BigEByte, + "zib": BigZiByte, + "zb": BigZByte, + "yib": BigYiByte, + "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, + // Without suffix + "": BigByte, + "ki": BigKiByte, + "k": BigKByte, + "mi": BigMiByte, + "m": BigMByte, + "gi": BigGiByte, + "g": BigGByte, + "ti": BigTiByte, + "t": BigTByte, + "pi": BigPiByte, + "p": BigPByte, + "ei": BigEiByte, + "e": BigEByte, + "z": BigZByte, + "zi": BigZiByte, + "y": BigYByte, + "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, +} + +var ten = big.NewInt(10) + +func humanateBigBytes(s, base *big.Int, sizes []string) string { + if s.Cmp(ten) < 0 { + return fmt.Sprintf("%d B", s) + } + c := (&big.Int{}).Set(s) + val, mag := oomm(c, base, len(sizes)-1) + suffix := sizes[mag] + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) + +} + +// BigBytes produces a human readable representation of an SI size. +// +// See also: ParseBigBytes. +// +// BigBytes(82854982) -> 83 MB +func BigBytes(s *big.Int) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} + return humanateBigBytes(s, bigSIExp, sizes) +} + +// BigIBytes produces a human readable representation of an IEC size. +// +// See also: ParseBigBytes. +// +// BigIBytes(82854982) -> 79 MiB +func BigIBytes(s *big.Int) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} + return humanateBigBytes(s, bigIECExp, sizes) +} + +// ParseBigBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See also: BigBytes, BigIBytes. +// +// ParseBigBytes("42 MB") -> 42000000, nil +// ParseBigBytes("42 mib") -> 44040192, nil +func ParseBigBytes(s string) (*big.Int, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + val := &big.Rat{} + _, err := fmt.Sscanf(num, "%f", val) + if err != nil { + return nil, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bigBytesSizeTable[extra]; ok { + mv := (&big.Rat{}).SetInt(m) + val.Mul(val, mv) + rv := &big.Int{} + rv.Div(val.Num(), val.Denom()) + return rv, nil + } + + return nil, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go new file mode 100644 index 000000000000..0b498f4885c5 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/bytes.go @@ -0,0 +1,143 @@ +package humanize + +import ( + "fmt" + "math" + "strconv" + "strings" + "unicode" +) + +// IEC Sizes. +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + PiByte + EiByte +) + +// SI Sizes. +const ( + IByte = 1 + KByte = IByte * 1000 + MByte = KByte * 1000 + GByte = MByte * 1000 + TByte = GByte * 1000 + PByte = TByte * 1000 + EByte = PByte * 1000 +) + +var bytesSizeTable = map[string]uint64{ + "b": Byte, + "kib": KiByte, + "kb": KByte, + "mib": MiByte, + "mb": MByte, + "gib": GiByte, + "gb": GByte, + "tib": TiByte, + "tb": TByte, + "pib": PiByte, + "pb": PByte, + "eib": EiByte, + "eb": EByte, + // Without suffix + "": Byte, + "ki": KiByte, + "k": KByte, + "mi": MiByte, + "m": MByte, + "gi": GiByte, + "g": GByte, + "ti": TiByte, + "t": TByte, + "pi": PiByte, + "p": PByte, + "ei": EiByte, + "e": EByte, +} + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%d B", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 + f := "%.0f %s" + if val < 10 { + f = "%.1f %s" + } + + return fmt.Sprintf(f, val, suffix) +} + +// Bytes produces a human readable representation of an SI size. +// +// See also: ParseBytes. +// +// Bytes(82854982) -> 83 MB +func Bytes(s uint64) string { + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1000, sizes) +} + +// IBytes produces a human readable representation of an IEC size. +// +// See also: ParseBytes. +// +// IBytes(82854982) -> 79 MiB +func IBytes(s uint64) string { + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + return humanateBytes(s, 1024, sizes) +} + +// ParseBytes parses a string representation of bytes into the number +// of bytes it represents. +// +// See Also: Bytes, IBytes. +// +// ParseBytes("42 MB") -> 42000000, nil +// ParseBytes("42 mib") -> 44040192, nil +func ParseBytes(s string) (uint64, error) { + lastDigit := 0 + hasComma := false + for _, r := range s { + if !(unicode.IsDigit(r) || r == '.' || r == ',') { + break + } + if r == ',' { + hasComma = true + } + lastDigit++ + } + + num := s[:lastDigit] + if hasComma { + num = strings.Replace(num, ",", "", -1) + } + + f, err := strconv.ParseFloat(num, 64) + if err != nil { + return 0, err + } + + extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) + if m, ok := bytesSizeTable[extra]; ok { + f *= float64(m) + if f >= math.MaxUint64 { + return 0, fmt.Errorf("too large: %v", s) + } + return uint64(f), nil + } + + return 0, fmt.Errorf("unhandled size name: %v", extra) +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go new file mode 100644 index 000000000000..520ae3e57d92 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -0,0 +1,116 @@ +package humanize + +import ( + "bytes" + "math" + "math/big" + "strconv" + "strings" +) + +// Comma produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Comma(834142) -> 834,142 +func Comma(v int64) string { + sign := "" + + // Min int64 can't be negated to a usable value, so it has to be special cased. + if v == math.MinInt64 { + return "-9,223,372,036,854,775,808" + } + + if v < 0 { + sign = "-" + v = 0 - v + } + + parts := []string{"", "", "", "", "", "", ""} + j := len(parts) - 1 + + for v > 999 { + parts[j] = strconv.FormatInt(v%1000, 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + v = v / 1000 + j-- + } + parts[j] = strconv.Itoa(int(v)) + return sign + strings.Join(parts[j:], ",") +} + +// Commaf produces a string form of the given number in base 10 with +// commas after every three orders of magnitude. +// +// e.g. Commaf(834142.32) -> 834,142.32 +func Commaf(v float64) string { + buf := &bytes.Buffer{} + if v < 0 { + buf.Write([]byte{'-'}) + v = 0 - v + } + + comma := []byte{','} + + parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} + +// CommafWithDigits works like the Commaf but limits the resulting +// string to the given number of decimal places. +// +// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 +func CommafWithDigits(f float64, decimals int) string { + return stripTrailingDigits(Commaf(f), decimals) +} + +// BigComma produces a string form of the given big.Int in base 10 +// with commas after every three orders of magnitude. +func BigComma(b *big.Int) string { + sign := "" + if b.Sign() < 0 { + sign = "-" + b.Abs(b) + } + + athousand := big.NewInt(1000) + c := (&big.Int{}).Set(b) + _, m := oom(c, athousand) + parts := make([]string, m+1) + j := len(parts) - 1 + + mod := &big.Int{} + for b.Cmp(athousand) >= 0 { + b.DivMod(b, athousand, mod) + parts[j] = strconv.FormatInt(mod.Int64(), 10) + switch len(parts[j]) { + case 2: + parts[j] = "0" + parts[j] + case 1: + parts[j] = "00" + parts[j] + } + j-- + } + parts[j] = strconv.Itoa(int(b.Int64())) + return sign + strings.Join(parts[j:], ",") +} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go new file mode 100644 index 000000000000..2bc83a03cf80 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -0,0 +1,41 @@ +//go:build go1.6 +// +build go1.6 + +package humanize + +import ( + "bytes" + "math/big" + "strings" +) + +// BigCommaf produces a string form of the given big.Float in base 10 +// with commas after every three orders of magnitude. +func BigCommaf(v *big.Float) string { + buf := &bytes.Buffer{} + if v.Sign() < 0 { + buf.Write([]byte{'-'}) + v.Abs(v) + } + + comma := []byte{','} + + parts := strings.Split(v.Text('f', -1), ".") + pos := 0 + if len(parts[0])%3 != 0 { + pos += len(parts[0]) % 3 + buf.WriteString(parts[0][:pos]) + buf.Write(comma) + } + for ; pos < len(parts[0]); pos += 3 { + buf.WriteString(parts[0][pos : pos+3]) + buf.Write(comma) + } + buf.Truncate(buf.Len() - 1) + + if len(parts) > 1 { + buf.Write([]byte{'.'}) + buf.WriteString(parts[1]) + } + return buf.String() +} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go new file mode 100644 index 000000000000..bce923f371aa --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -0,0 +1,49 @@ +package humanize + +import ( + "strconv" + "strings" +) + +func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } + offset := len(s) - 1 + for offset > 0 { + if s[offset] == '.' { + offset-- + break + } + if s[offset] != '0' { + break + } + offset-- + } + return s[:offset+1] +} + +func stripTrailingDigits(s string, digits int) string { + if i := strings.Index(s, "."); i >= 0 { + if digits <= 0 { + return s[:i] + } + i++ + if i+digits >= len(s) { + return s + } + return s[:i+digits] + } + return s +} + +// Ftoa converts a float to a string with no trailing zeros. +func Ftoa(num float64) string { + return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) +} + +// FtoaWithDigits converts a float to a string but limits the resulting string +// to the given number of decimal places, and no trailing zeros. +func FtoaWithDigits(num float64, digits int) string { + return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) +} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go new file mode 100644 index 000000000000..a2c2da31ef1a --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/humanize.go @@ -0,0 +1,8 @@ +/* +Package humanize converts boring ugly numbers to human-friendly strings and back. + +Durations can be turned into strings such as "3 days ago", numbers +representing sizes like 82854982 into useful strings like, "83 MB" or +"79 MiB" (whichever you prefer). +*/ +package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go new file mode 100644 index 000000000000..6470d0d47a8f --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -0,0 +1,192 @@ +package humanize + +/* +Slightly adapted from the source to fit go-humanize. + +Author: https://github.com/gorhill +Source: https://gist.github.com/gorhill/5285193 + +*/ + +import ( + "math" + "strconv" +) + +var ( + renderFloatPrecisionMultipliers = [...]float64{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + } + + renderFloatPrecisionRounders = [...]float64{ + 0.5, + 0.05, + 0.005, + 0.0005, + 0.00005, + 0.000005, + 0.0000005, + 0.00000005, + 0.000000005, + 0.0000000005, + } +) + +// FormatFloat produces a formatted number as string based on the following user-specified criteria: +// * thousands separator +// * decimal separator +// * decimal precision +// +// Usage: s := RenderFloat(format, n) +// The format parameter tells how to render the number n. +// +// See examples: http://play.golang.org/p/LXc1Ddm1lJ +// +// Examples of format strings, given n = 12345.6789: +// "#,###.##" => "12,345.67" +// "#,###." => "12,345" +// "#,###" => "12345,678" +// "#\u202F###,##" => "12 345,68" +// "#.###,###### => 12.345,678900 +// "" (aka default format) => 12,345.67 +// +// The highest precision allowed is 9 digits after the decimal symbol. +// There is also a version for integer number, FormatInteger(), +// which is convenient for calls within template. +func FormatFloat(format string, n float64) string { + // Special cases: + // NaN = "NaN" + // +Inf = "+Infinity" + // -Inf = "-Infinity" + if math.IsNaN(n) { + return "NaN" + } + if n > math.MaxFloat64 { + return "Infinity" + } + if n < (0.0 - math.MaxFloat64) { + return "-Infinity" + } + + // default format + precision := 2 + decimalStr := "." + thousandStr := "," + positiveStr := "" + negativeStr := "-" + + if len(format) > 0 { + format := []rune(format) + + // If there is an explicit format directive, + // then default values are these: + precision = 9 + thousandStr = "" + + // collect indices of meaningful formatting directives + formatIndx := []int{} + for i, char := range format { + if char != '#' && char != '0' { + formatIndx = append(formatIndx, i) + } + } + + if len(formatIndx) > 0 { + // Directive at index 0: + // Must be a '+' + // Raise an error if not the case + // index: 0123456789 + // +0.000,000 + // +000,000.0 + // +0000.00 + // +0000 + if formatIndx[0] == 0 { + if format[formatIndx[0]] != '+' { + panic("RenderFloat(): invalid positive sign directive") + } + positiveStr = "+" + formatIndx = formatIndx[1:] + } + + // Two directives: + // First is thousands separator + // Raise an error if not followed by 3-digit + // 0123456789 + // 0.000,000 + // 000,000.00 + if len(formatIndx) == 2 { + if (formatIndx[1] - formatIndx[0]) != 4 { + panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") + } + thousandStr = string(format[formatIndx[0]]) + formatIndx = formatIndx[1:] + } + + // One directive: + // Directive is decimal separator + // The number of digit-specifier following the separator indicates wanted precision + // 0123456789 + // 0.00 + // 000,0000 + if len(formatIndx) == 1 { + decimalStr = string(format[formatIndx[0]]) + precision = len(format) - formatIndx[0] - 1 + } + } + } + + // generate sign part + var signStr string + if n >= 0.000000001 { + signStr = positiveStr + } else if n <= -0.000000001 { + signStr = negativeStr + n = -n + } else { + signStr = "" + n = 0.0 + } + + // split number into integer and fractional parts + intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) + + // generate integer part string + intStr := strconv.FormatInt(int64(intf), 10) + + // add thousand separator if required + if len(thousandStr) > 0 { + for i := len(intStr); i > 3; { + i -= 3 + intStr = intStr[:i] + thousandStr + intStr[i:] + } + } + + // no fractional part, we can leave now + if precision == 0 { + return signStr + intStr + } + + // generate fractional part + fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) + // may need padding + if len(fracStr) < precision { + fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr + } + + return signStr + intStr + decimalStr + fracStr +} + +// FormatInteger produces a formatted number as string. +// See FormatFloat. +func FormatInteger(format string, n int) string { + return FormatFloat(format, float64(n)) +} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go new file mode 100644 index 000000000000..43d88a861950 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/ordinals.go @@ -0,0 +1,25 @@ +package humanize + +import "strconv" + +// Ordinal gives you the input number in a rank/ordinal format. +// +// Ordinal(3) -> 3rd +func Ordinal(x int) string { + suffix := "th" + switch x % 10 { + case 1: + if x%100 != 11 { + suffix = "st" + } + case 2: + if x%100 != 12 { + suffix = "nd" + } + case 3: + if x%100 != 13 { + suffix = "rd" + } + } + return strconv.Itoa(x) + suffix +} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go new file mode 100644 index 000000000000..8b85019849a3 --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -0,0 +1,127 @@ +package humanize + +import ( + "errors" + "math" + "regexp" + "strconv" +) + +var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto + -24: "y", // yocto + -21: "z", // zepto + -18: "a", // atto + -15: "f", // femto + -12: "p", // pico + -9: "n", // nano + -6: "µ", // micro + -3: "m", // milli + 0: "", + 3: "k", // kilo + 6: "M", // mega + 9: "G", // giga + 12: "T", // tera + 15: "P", // peta + 18: "E", // exa + 21: "Z", // zetta + 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta +} + +var revSIPrefixTable = revfmap(siPrefixTable) + +// revfmap reverses the map and precomputes the power multiplier +func revfmap(in map[float64]string) map[string]float64 { + rv := map[string]float64{} + for k, v := range in { + rv[v] = math.Pow(10, k) + } + return rv +} + +var riParseRegex *regexp.Regexp + +func init() { + ri := `^([\-0-9.]+)\s?([` + for _, v := range siPrefixTable { + ri += v + } + ri += `]?)(.*)` + + riParseRegex = regexp.MustCompile(ri) +} + +// ComputeSI finds the most appropriate SI prefix for the given number +// and returns the prefix along with the value adjusted to be within +// that prefix. +// +// See also: SI, ParseSI. +// +// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") +func ComputeSI(input float64) (float64, string) { + if input == 0 { + return 0, "" + } + mag := math.Abs(input) + exponent := math.Floor(logn(mag, 10)) + exponent = math.Floor(exponent/3) * 3 + + value := mag / math.Pow(10, exponent) + + // Handle special case where value is exactly 1000.0 + // Should return 1 M instead of 1000 k + if value == 1000.0 { + exponent += 3 + value = mag / math.Pow(10, exponent) + } + + value = math.Copysign(value, input) + + prefix := siPrefixTable[exponent] + return value, prefix +} + +// SI returns a string with default formatting. +// +// SI uses Ftoa to format float value, removing trailing zeros. +// +// See also: ComputeSI, ParseSI. +// +// e.g. SI(1000000, "B") -> 1 MB +// e.g. SI(2.2345e-12, "F") -> 2.2345 pF +func SI(input float64, unit string) string { + value, prefix := ComputeSI(input) + return Ftoa(value) + " " + prefix + unit +} + +// SIWithDigits works like SI but limits the resulting string to the +// given number of decimal places. +// +// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB +// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF +func SIWithDigits(input float64, decimals int, unit string) string { + value, prefix := ComputeSI(input) + return FtoaWithDigits(value, decimals) + " " + prefix + unit +} + +var errInvalid = errors.New("invalid input") + +// ParseSI parses an SI string back into the number and unit. +// +// See also: SI, ComputeSI. +// +// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) +func ParseSI(input string) (float64, string, error) { + found := riParseRegex.FindStringSubmatch(input) + if len(found) != 4 { + return 0, "", errInvalid + } + mag := revSIPrefixTable[found[2]] + unit := found[3] + + base, err := strconv.ParseFloat(found[1], 64) + return base * mag, unit, err +} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go new file mode 100644 index 000000000000..dd3fbf5efc0c --- /dev/null +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -0,0 +1,117 @@ +package humanize + +import ( + "fmt" + "math" + "sort" + "time" +) + +// Seconds-based time units +const ( + Day = 24 * time.Hour + Week = 7 * Day + Month = 30 * Day + Year = 12 * Month + LongTime = 37 * Year +) + +// Time formats a time into a relative string. +// +// Time(someT) -> "3 weeks ago" +func Time(then time.Time) string { + return RelTime(then, time.Now(), "ago", "from now") +} + +// A RelTimeMagnitude struct contains a relative time point at which +// the relative format of time will switch to a new format string. A +// slice of these in ascending order by their "D" field is passed to +// CustomRelTime to format durations. +// +// The Format field is a string that may contain a "%s" which will be +// replaced with the appropriate signed label (e.g. "ago" or "from +// now") and a "%d" that will be replaced by the quantity. +// +// The DivBy field is the amount of time the time difference must be +// divided by in order to display correctly. +// +// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" +// DivBy should be time.Minute so whatever the duration is will be +// expressed in minutes. +type RelTimeMagnitude struct { + D time.Duration + Format string + DivBy time.Duration +} + +var defaultMagnitudes = []RelTimeMagnitude{ + {time.Second, "now", time.Second}, + {2 * time.Second, "1 second %s", 1}, + {time.Minute, "%d seconds %s", time.Second}, + {2 * time.Minute, "1 minute %s", 1}, + {time.Hour, "%d minutes %s", time.Minute}, + {2 * time.Hour, "1 hour %s", 1}, + {Day, "%d hours %s", time.Hour}, + {2 * Day, "1 day %s", 1}, + {Week, "%d days %s", Day}, + {2 * Week, "1 week %s", 1}, + {Month, "%d weeks %s", Week}, + {2 * Month, "1 month %s", 1}, + {Year, "%d months %s", Month}, + {18 * Month, "1 year %s", 1}, + {2 * Year, "2 years %s", 1}, + {LongTime, "%d years %s", Year}, + {math.MaxInt64, "a long while %s", 1}, +} + +// RelTime formats a time into a relative string. +// +// It takes two times and two labels. In addition to the generic time +// delta string (e.g. 5 minutes), the labels are used applied so that +// the label corresponding to the smaller time is applied. +// +// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" +func RelTime(a, b time.Time, albl, blbl string) string { + return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) +} + +// CustomRelTime formats a time into a relative string. +// +// It takes two times two labels and a table of relative time formats. +// In addition to the generic time delta string (e.g. 5 minutes), the +// labels are used applied so that the label corresponding to the +// smaller time is applied. +func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { + lbl := albl + diff := b.Sub(a) + + if a.After(b) { + lbl = blbl + diff = a.Sub(b) + } + + n := sort.Search(len(magnitudes), func(i int) bool { + return magnitudes[i].D > diff + }) + + if n >= len(magnitudes) { + n = len(magnitudes) - 1 + } + mag := magnitudes[n] + args := []interface{}{} + escaped := false + for _, ch := range mag.Format { + if escaped { + switch ch { + case 's': + args = append(args, lbl) + case 'd': + args = append(args, diff/mag.DivBy) + } + escaped = false + } else { + escaped = ch == '%' + } + } + return fmt.Sprintf(mag.Format, args...) +} diff --git a/vendor/github.com/go-ini/ini/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig new file mode 100644 index 000000000000..4a2d9180f96f --- /dev/null +++ b/vendor/github.com/go-ini/ini/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 000000000000..588388bda28d --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,7 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode +.DS_Store diff --git a/vendor/github.com/go-ini/ini/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml new file mode 100644 index 000000000000..631e369254d3 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000000..d361bbcdf5c9 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 000000000000..f3b0dae2d298 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 000000000000..30606d9700a8 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,43 @@ +# INI + +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.13**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml new file mode 100644 index 000000000000..e02ec84bc05f --- /dev/null +++ b/vendor/github.com/go-ini/ini/codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true + +comment: + layout: 'diff' + +github_checks: false diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go new file mode 100644 index 000000000000..c3a541f1d1b5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/data_source.go @@ -0,0 +1,76 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" +) + +var ( + _ dataSource = (*sourceFile)(nil) + _ dataSource = (*sourceData)(nil) + _ dataSource = (*sourceReadCloser)(nil) +) + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type %q", s) + } +} diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go new file mode 100644 index 000000000000..48b8e66d6d6f --- /dev/null +++ b/vendor/github.com/go-ini/ini/deprecated.go @@ -0,0 +1,22 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +var ( + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore = SnackCase +) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000000..f66bc94b8b69 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,49 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 000000000000..f8b22408be51 --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,541 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of one or more INI files in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // To keep track of the index of a section with same name. + // This meta list is only used with non-unique section names are allowed. + sectionIndexes []int + + // Actual data is stored here. + sections map[string][]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + if len(opts.KeyValueDelimiterOnWrite) == 0 { + opts.KeyValueDelimiterOnWrite = "=" + } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } + + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string][]*Section), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty(opts ...LoadOptions) *File { + var opt LoadOptions + if len(opts) > 0 { + opt = opts[0] + } + + // Ignore error here, we are sure our data is good. + f, _ := LoadSources(opt, []byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("empty section name") + } + + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { + return f.sections[name][0], nil + } + + f.sectionList = append(f.sectionList, name) + + // NOTE: Append to indexes must happen before appending to sections, + // otherwise index will have off-by-one problem. + f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) + + sec := newSection(f, name) + f.sections[name] = append(f.sections[name], sec) + + return sec, nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + secs, err := f.SectionsByName(name) + if err != nil { + return nil, err + } + + return secs[0], err +} + +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + +// SectionsByName returns all sections with given name. +func (f *File) SectionsByName(name string) ([]*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + secs := f.sections[name] + if len(secs) == 0 { + return nil, fmt.Errorf("section %q does not exist", name) + } + + return secs, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + if name == "" { + name = DefaultSection + } + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// SectionWithIndex assumes named section exists and returns a new section when not. +func (f *File) SectionWithIndex(name string, index int) *Section { + secs, err := f.SectionsByName(name) + if err != nil || len(secs) <= index { + // NOTE: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + newSec, _ := f.NewSection(name) + return newSec + } + + return secs[index] +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name][f.sectionIndexes[i]] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section or all sections with given name. +func (f *File) DeleteSection(name string) { + secs, err := f.SectionsByName(name) + if err != nil { + return + } + + for i := 0; i < len(secs); i++ { + // For non-unique sections, it is always needed to remove the first one so + // in the next iteration, the subsequent section continue having index 0. + // Ignoring the error as index 0 never returns an error. + _ = f.DeleteSectionWithIndex(name, 0) + } +} + +// DeleteSectionWithIndex deletes a section with given name and index. +func (f *File) DeleteSectionWithIndex(name string, index int) error { + if !f.options.AllowNonUniqueSections && index != 0 { + return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") + } + + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + // Count occurrences of the sections + occurrences := 0 + + sectionListCopy := make([]string, len(f.sectionList)) + copy(sectionListCopy, f.sectionList) + + for i, s := range sectionListCopy { + if s != name { + continue + } + + if occurrences == index { + if len(f.sections[name]) <= 1 { + delete(f.sections, name) // The last one in the map + } else { + f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) + } + + // Fix section lists + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) + + } else if occurrences > index { + // Fix the indices of all following sections with this name. + f.sectionIndexes[i-1]-- + } + + occurrences++ + } + + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + _ = f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + if f.options.ShortCircuit { + return nil + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 + for i, sname := range f.sectionList { + sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + isLastSection := i == lastSectionIdx + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modified if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KeyList: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + writeKeyValue := func(val string) (bool, error) { + if _, err := buf.WriteString(kname); err != nil { + return false, err + } + + if key.isBooleanType { + buf.WriteString(LineBreak) + return true, nil + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename after done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go new file mode 100644 index 000000000000..f9d80a682a55 --- /dev/null +++ b/vendor/github.com/go-ini/ini/helper.go @@ -0,0 +1,24 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000000..99e7f86511a4 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,176 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "os" + "regexp" + "runtime" + "strings" +) + +const ( + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 +) + +var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" +) + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func init() { + if runtime.GOOS == "windows" && !inTest { + LineBreak = "\r\n" + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". + KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool + // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). + DebugFunc DebugFunc + // ReaderBufferSize is the buffer size of the reader in bytes. + ReaderBufferSize int + // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. + AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool +} + +// DebugFunc is the type of function called to log parse events. +type DebugFunc func(message string) + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000000..a19d9f38ef14 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,837 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { + return nil + } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } + return []string{k.value} + } + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + // If not found or found the key itself, then search again in default section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + nk, _ = k.s.f.Section("").GetKey(noption) + if nk == nil { + // Stop when no results found in the default section, + // and returns the value as-is. + break + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Bools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidBools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictBools returns list of bool divided by given delimiter or error on first invalid input. +func (k *Key) StrictBools(delim string) ([]bool, error) { + return k.parseBools(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseBools transforms strings to bools. +func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { + vals := make([]bool, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := parseBool(str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) + } + } + return vals, err +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseFloat(str, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) + } + } + return vals, err +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) + } + } + return vals, err +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) + } + } + return vals, err +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) + } + } + return vals, err +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) + } + } + return vals, err +} + +type Parser func(str string) (interface{}, error) + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000000..44fc526c2cb6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,520 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +const minReaderBufferSize = 4096 + +var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool + DebugFunc DebugFunc + ReaderBufferSize int +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func (p *parser) debug(format string, args ...interface{}) { + if p.options.DebugFunc != nil { + p.options.DebugFunc(fmt.Sprintf(format, args...)) + } +} + +func newParser(r io.Reader, opts parserOptions) *parser { + size := opts.ReaderBufferSize + if size < minReaderBufferSize { + size = minReaderBufferSize + } + + return &parser{ + buf: bufio.NewReaderSize(r, size), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + _, err = p.buf.Read(mask) + if err != nil { + return err + } + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + _, err = p.buf.Read(mask) + if err != nil { + return err + } + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + var endIdx int + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && line[0:3] == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil && peekErr != io.EOF { + p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) + return "", peekErr + } + + p.debug("readPythonMultilines: parsing %q", string(peekData)) + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) + for n, v := range peekMatches { + p.debug(" %d: %q", n, v) + } + + // Return if not a Python multiline value. + if len(peekMatches) != 3 { + p.debug("readPythonMultilines: end of value, got: %q", line) + return line, nil + } + + // Advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.buf.Discard(len(peekData)) + if err != nil { + p.debug("readPythonMultilines: failed to skip to the end, returning error") + return "", err + } + + line += "\n" + peekMatches[0] + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + DebugFunc: f.options.DebugFunc, + ReaderBufferSize: f.options.ReaderBufferSize, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 4kb at a time. + currentPeekSize := minReaderBufferSize + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset auto-counter and comments + p.comment.Reset() + p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + switch { + // Treat as boolean key when desired, and whole line is key name. + case IsErrDelimiterNotFound(err): + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000000..a3615d820b7a --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := make(map[string]string, len(s.keysHash)) + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + s.f.options.ChildSectionDelimiter + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]...) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000000..a486b2fe0fdc --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,747 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // SnackCase converts to format SNACK_CASE. + SnackCase NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= 'A' - 'a' + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflect.Bool: + vals, err = key.parseBools(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflect.Bool: + slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + vt := t + isPtr := t.Kind() == reflect.Ptr + if isPtr { + vt = t.Elem() + } + switch vt.Kind() { + case reflect.String: + stringVal := key.String() + if isPtr { + field.Set(reflect.ValueOf(&stringVal)) + } else if len(stringVal) > 0 { + field.SetString(key.String()) + } + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&boolVal)) + } else { + field.SetBool(boolVal) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // ParseDuration will not return err for `0`, so check the type name + if vt.Name() == "Duration" { + durationVal, err := key.Duration() + if err != nil { + if intVal, err := key.Int64(); err == nil { + field.SetInt(intVal) + return nil + } + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else if int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetInt(intVal) + field.Set(pv) + } else { + field.SetInt(intVal) + } + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetUint(uintVal) + field.Set(pv) + } else { + field.SetUint(uintVal) + } + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetFloat(floatVal) + field.Set(pv) + } else { + field.SetFloat(floatVal) + } + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&timeVal)) + } else { + field.Set(reflect.ValueOf(timeVal)) + } + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) + rawName = opts[0] + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") + } + return rawName, omitEmpty, allowShadow, allowNonUnique, extends +} + +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + continue + } + } + + // Map non-unique sections + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + newField, err := s.mapToSlice(fieldName, field, isStrict) + if err != nil { + return fmt.Errorf("map to slice %q: %v", fieldName, err) + } + + field.Set(newField) + continue + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("set field %q: %v", fieldName, err) + } + } + } + return nil +} + +// mapToSlice maps all sections with the same name and returns the new value. +// The type of the Value must be a slice. +func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { + secs, err := s.f.SectionsByName(secName) + if err != nil { + return reflect.Value{}, err + } + + typ := val.Type().Elem() + for i, sec := range secs { + elem := reflect.New(typ) + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { + return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) + } + + val = reflect.Append(val, elem.Elem()) + } + return val, nil +} + +// mapTo maps a section to object v. +func (s *Section) mapTo(v interface{}, isStrict bool) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + if typ.Kind() == reflect.Slice { + newField, err := s.mapToSlice(s.name, val, isStrict) + if err != nil { + return err + } + + val.Set(newField) + return nil + } + + return s.mapToField(val, isStrict, 0, s.name) +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + return s.mapTo(v, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + return s.mapTo(v, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflect.Bool: + val = fmt.Sprint(slice.Index(i).Bool()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + _ = keyWithShadows.AddShadow(val) + } + } + *key = *keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflect.Bool: + buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + for i := 0; i < field.Len(); i++ { + if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { + return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) + } + + sec, err := s.f.NewSection(fieldName) + if err != nil { + return err + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err := sec.reflectFrom(slice.Index(i)); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + } + continue + } + + // Note: Same reason as section. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("reflect field %q: %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects section from given struct. It overwrites existing ones. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + + if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && + (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { + // Clear sections to make sure none exists before adding the new ones + s.f.DeleteSection(s.name) + + if typ.Kind() == reflect.Ptr { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + return sec.reflectFrom(val.Elem()) + } + + slice := val.Slice(0, val.Len()) + sliceOf := val.Type().Elem().Kind() + if sliceOf != reflect.Ptr { + return fmt.Errorf("not a slice of pointers") + } + + for i := 0; i < slice.Len(); i++ { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + + err = sec.reflectFrom(slice.Index(i)) + if err != nil { + return fmt.Errorf("reflect from %dth field: %v", i, err) + } + } + + return nil + } + + if typ.Kind() == reflect.Ptr { + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go new file mode 100644 index 000000000000..affbbbb595c5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/norace.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +func ReadSlice[T any](s []T) { +} + +func WriteSlice[T any](s []T) { +} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go new file mode 100644 index 000000000000..f5e240dcde46 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/race.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "runtime" + "unsafe" +) + +func ReadSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} + +func WriteSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore new file mode 100644 index 000000000000..3a89c6e3e260 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/.gitignore @@ -0,0 +1,15 @@ +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE new file mode 100644 index 000000000000..1d2d645bd939 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md new file mode 100644 index 000000000000..b0bf59fbbd20 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -0,0 +1,1134 @@ +# MinLZ + +I have taken the experiences from this library and created a backwards compatible compression package called MinLZ. + +That package will seamlessly decode S2 content, making the transition from this package fairly trivial. + +There are many improvements to pretty much all aspects of S2 since we have "broken free" of the Snappy format specification. +You can read a writeup on [Design and Improvements over S2](https://gist.github.com/klauspost/a25b66198cdbdf7b5b224f670c894ed5). + +The only aspect not covered is custom dictionary encoding. While I do intend to fix errors in this package, +I do not expect to make significant improvements, since I consider MinLZ a better basis for going forward. + +See https://github.com/minio/minlz for all details. + +# S2 Compression + +S2 is an extension of [Snappy](https://github.com/google/snappy). + +S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. + +Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. +This means that S2 can seamlessly replace Snappy without converting compressed content. + +S2 can produce Snappy compatible output, faster and better than Snappy. +If you want full benefit of the changes you should use s2 without Snappy compatibility. + +S2 is designed to have high throughput on content that cannot be compressed. +This is important, so you don't have to worry about spending CPU cycles on already compressed data. + +## Benefits over Snappy + +* Better compression +* Adjustable compression (3 levels) +* Concurrent stream compression +* Faster decompression, even for Snappy compatible content +* Concurrent Snappy/S2 stream decompression +* Skip forward in compressed stream +* Random seeking with indexes +* Compatible with reading Snappy compressed content +* Smaller block size overhead on incompressible blocks +* Block concatenation +* Block Dictionary support +* Uncompressed stream mode +* Automatic stream size padding +* Snappy compatible block compression + +## Drawbacks over Snappy + +* Not optimized for 32 bit systems +* Streams use slightly more memory due to larger blocks and concurrency (configurable) + +# Usage + +Installation: `go get -u github.com/klauspost/compress/s2` + +Full package documentation: + +[![godoc][1]][2] + +[1]: https://godoc.org/github.com/klauspost/compress?status.svg +[2]: https://godoc.org/github.com/klauspost/compress/s2 + +## Compression + +```Go +func EncodeStream(src io.Reader, dst io.Writer) error { + enc := s2.NewWriter(dst) + _, err := io.Copy(enc, src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. + +For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. + +The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. +It is possible to flush any buffered data using the `Flush()` method. +This will block until all data sent to the encoder has been written to the output. + +S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. + +As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, +a slightly more efficient method is to use the `EncodeBuffer` method. +This will take ownership of the buffer until the stream is closed. + +```Go +func EncodeStream(src []byte, dst io.Writer) error { + enc := s2.NewWriter(dst) + // The encoder owns the buffer until Flush or Close is called. + err := enc.EncodeBuffer(src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, +so it should only be used a single time per stream. +If you need to write several blocks, you should use the regular io.Writer interface. + + +## Decompression + +```Go +func DecodeStream(src io.Reader, dst io.Writer) error { + dec := s2.NewReader(src) + _, err := io.Copy(dst, dec) + return err +} +``` + +Similar to the Writer, a Reader can be reused using the `Reset` method. + +For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. +However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. + +For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. +Do however note that these functions (similar to Snappy) does not provide validation of data, +so data corruption may be undetected. Stream encoding provides CRC checks of data. + +It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. +For big skips the decompressor is able to skip blocks without decompressing them. + +## Single Blocks + +Similar to Snappy S2 offers single block compression. +Blocks do not offer the same flexibility and safety as streams, +but may be preferable for very small payloads, less than 100K. + +Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. +It is possible to provide a destination buffer. +If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. +If not a new will be allocated. + +Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. + +Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. +Again an optional destination buffer can be supplied. +The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. +If that is not satisfied a new buffer will be allocated. + +Block function always operate on a single goroutine since it should only be used for small payloads. + +# Commandline tools + +Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. + +Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). + +Installing then requires Go to be installed. To install them, use: + +`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` + +To build binaries to the current folder use: + +`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` + + +## s2c + +``` +Usage: s2c [options] file1 file2 + +Compresses all files supplied as input separately. +Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. +By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and compressed. +Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -blocksize string + Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -faster + Compress faster, but with a minor compression loss + -help + Display help + -index + Add seek index (default true) + -o string + Write output to another file. Single input file only + -pad string + Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -slower + Compress more, but a lot slower + -snappy + Generate Snappy compatible output stream + -verify + Verify written files + +``` + +## s2d + +``` +Usage: s2d [options] file1 file2 + +Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. +Output file names have the extension removed. By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and decompressed. +Extensions on downloaded files are ignored. Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -c Write all output to stdout. Multiple input files will be concatenated + -help + Display help + -o string + Write output to another file. Single input file only + -offset string + Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful decompression + -safe + Do not overwrite output files + -tail string + Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -verify + Verify files, but do not write output +``` + +## s2sx: self-extracting archives + +s2sx allows creating self-extracting archives with no dependencies. + +By default, executables are created for the same platforms as the host os, +but this can be overridden with `-os` and `-arch` parameters. + +Extracted files have 0666 permissions, except when untar option used. + +``` +Usage: s2sx [options] file1 file2 + +Compresses all files supplied as input separately. +If files have '.s2' extension they are assumed to be compressed already. +Output files are written as 'filename.s2sx' and with '.exe' for windows targets. +If output is big, an additional file with ".more" is written. This must be included as well. +By default output files will be overwritten. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +Options: + -arch string + Destination architecture (default "amd64") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -help + Display help + -max string + Maximum executable size. Rest will be written to another file. (default "1G") + -os string + Destination operating system (default "windows") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -untar + Untar on destination +``` + +Available platforms are: + + * darwin-amd64 + * darwin-arm64 + * linux-amd64 + * linux-arm + * linux-arm64 + * linux-mips64 + * linux-ppc64le + * windows-386 + * windows-amd64 + +By default, there is a size limit of 1GB for the output executable. + +When this is exceeded the remaining file content is written to a file called +output+`.more`. This file must be included for a successful extraction and +placed alongside the executable for a successful extraction. + +This file *must* have the same name as the executable, so if the executable is renamed, +so must the `.more` file. + +This functionality is disabled with stdin/stdout. + +### Self-extracting TAR files + +If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. + +Files are extracted to the current folder with the path specified in the tar file. + +Note that tar files are not validated before they are wrapped. + +For security reasons files that move below the root folder are not allowed. + +# Performance + +This section will focus on comparisons to Snappy. +This package is solely aimed at replacing Snappy as a high speed compression package. +If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) +gives better compression, but typically at speeds slightly below "better" mode in this package. + +Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. + +Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. + +A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. +The content compressed in this mode is fully compatible with the standard decoder. + +Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): + +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | + +### Legend + +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. +* `S2 % smaller`: How many percent of the Snappy output size is S2 better. +* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. + +There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. + +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. + +The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. + +Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. +This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). + +## Decompression + +S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. + +S2 vs Snappy **decompression** speed. Both operating on single core: + +| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | +|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | +| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | + +### Legend + +* `S2 Throughput`: Decompression speed of S2 encoded content. +* `Better Throughput`: Decompression speed of S2 "better" encoded content. +* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. + + +While the decompression code hasn't changed, there is a significant speedup in decompression speed. +S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. +While this reduces compression a bit, it improves decompression speed. + +The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. + +Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: + +| File | S2 Throughput | S2 throughput | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | + +Even though S2 typically compresses better than Snappy, decompression speed is always better. + +### Concurrent Stream Decompression + +For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) +that will decode a full stream using multiple goroutines. + +Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3: + +| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` | +|-------------------------------------------|------------|------------|------------|------------|-------------| +| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s | +| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s | +| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s | +| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s | +| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s | + +Scaling can be expected to be pretty linear until memory bandwidth is saturated. + +For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads. + +## Block compression + + +When compressing blocks no concurrent compression is performed just as Snappy. +This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. + +An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. +In rare, worst case scenario Snappy blocks could be significantly bigger than the input. + +### Mixed content blocks + +The most reliable is a wide dataset. +For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | + +S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". +"Better" mode provides the same compression speed as LZ4 with better compression ratio. + +When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. + +As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. + +Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for +other Go compressors: + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | +| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | +| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | +| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | + +### Standard block compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. + +Block compression. Parallel benchmark running on 16 cores, 16 goroutines. + +AMD64 assembly is use for both S2 and Snappy. + +| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | +|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + + +Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. + +Decompression speed is better than Snappy, except in one case. + +Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. + +Size is on average around Snappy, but varies on content type. +In cases where compression is worse, it usually is compensated by a speed boost. + + +### Better compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | +|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + + +Except for the mostly incompressible JPEG image compression is better and usually in the +double digits in terms of percentage reduction over Snappy. + +The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder +to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. + +This mode aims to provide better compression at the expense of performance and achieves that +without a huge performance penalty, except on very small blocks. + +Decompression speed suffers a little compared to the regular S2 mode, +but still manages to be close to Snappy in spite of increased compression. + +# Best compression mode + +S2 offers a "best" compression mode. + +This will compress as much as possible with little regard to CPU usage. + +Mainly for offline compression, but where decompression speed should still +be high and compatible with other S2 compressed data. + +Some examples compared on 16 core CPU, amd64 assembly used: + +``` +* enwik10 +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s + +* github-june-2days-2019.json +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s + +* nyc-taxi-data-10M.csv +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s + +* 10gb.tar +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ + +* consensus.db.10gb +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s +``` + +Decompression speed should be around the same as using the 'better' compression mode. + +## Dictionaries + +*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for +neither encoding nor decoding. Performance improvements can be expected in the future.* + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +The same dictionary *must* be used for both encoding and decoding. +S2 does not keep track of whether the same dictionary is used, +and using the wrong dictionary will most often not result in an error when decompressing. + +Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary. +This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries +and treat the blocks similarly. + +Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression), +the same usage scenario applies to S2 dictionaries. + +> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. + +S2 further limits the dictionary to only be enabled on the first 64KB of a block. +This will remove any negative (speed) impacts of the dictionaries on bigger blocks. + +### Compression + +Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst) +and a 64KB dictionary trained with zStandard the following sizes can be achieved. + +| | Default | Better | Best | +|--------------------|------------------|------------------|-----------------------| +| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) | +| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) | + +So for highly repetitive content, this case provides an almost 3x reduction in size. + +For less uniform data we will use the Go source code tree. +Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input: + +| | Default | Better | Best | +|--------------------|-------------------|-------------------|-------------------| +| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) | +| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) | +| Saving/file | 362 bytes | 428 bytes | 472 bytes | + + +### Creating Dictionaries + +There are no tools to create dictionaries in S2. +However, there are multiple ways to create a useful dictionary: + +#### Using a Sample File + +If your input is very uniform, you can just use a sample file as the dictionary. + +For example in the `github_users_sample_set` above, the average compression only goes up from +10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary. + +```Go + // Read a sample + sample, err := os.ReadFile("sample.json") + + // Create a dictionary. + dict := s2.MakeDict(sample, nil) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // To encode: + encoded := dict.Encode(nil, file) + + // To decode: + decoded, err := dict.Decode(nil, file) +``` + +#### Using Zstandard + +Zstandard dictionaries can easily be converted to S2 dictionaries. + +This can be helpful to generate dictionaries for files that don't have a fixed structure. + + +Example, with training set files placed in `./training-set`: + +`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict` + +This will create a dictionary of 64KB, that can be converted to a dictionary like this: + +```Go + // Decode the Zstandard dictionary. + insp, err := zstd.InspectDictionary(zdict) + if err != nil { + panic(err) + } + + // We are only interested in the contents. + // Assume that files start with "// Copyright (c) 2023". + // Search for the longest match for that. + // This may save a few bytes. + dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // We can now encode using this dictionary + encodedWithDict := dict.Encode(nil, payload) + + // To decode content: + decoded, err := dict.Decode(nil, encodedWithDict) +``` + +It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary. + +This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized. + +Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files. +This can be omitted, at the expense of a few bytes. + +# Snappy Compatibility + +S2 now offers full compatibility with Snappy. + +This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. + +There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by +simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. +This uses "better" mode for all operations. +If you would like more control, you can use the s2 package as described below: + +## Blocks + +Snappy compatible blocks can be generated with the S2 encoder. +Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace + +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | + +`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. + +`s2.ConcatBlocks` is compatible with snappy blocks. + +Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: + +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | + +## Streams + +For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. +All other options are available, but note that block size limit is different for snappy. + +Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: + +| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | +|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| +| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | +| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | +| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | +| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | +| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | + +# Decompression + +All decompression functions map directly to equivalent s2 functions. + +| Snappy | S2 replacement | +|------------------------|--------------------| +| snappy.Decode(...) | s2.Decode(...) | +| snappy.DecodedLen(...) | s2.DecodedLen(...) | +| snappy.NewReader(...) | s2.NewReader(...) | + +Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) +are also available for Snappy streams. + +If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) +on your Reader will reduce memory consumption. + +# Concatenating blocks and streams. + +Concatenating streams will concatenate the output of both without recompressing them. +While this is inefficient in terms of compression it might be usable in certain scenarios. +The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. + +Blocks can be concatenated using the `ConcatBlocks` function. + +Snappy blocks/streams can safely be concatenated with S2 blocks and streams. +Streams with indexes (see below) will currently not work on concatenated streams. + +# Stream Seek Index + +S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. + +The index can either be appended to the stream as a skippable block or returned for separate storage. + +When the index is appended to a stream it will be skipped by regular decoders, +so the output remains compatible with other decoders. + +## Creating an Index + +To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. +Then the index will be added to the stream when `Close()` is called. + +``` + // Add Index to stream... + enc := s2.NewWriter(w, s2.WriterAddIndex()) + io.Copy(enc, r) + enc.Close() +``` + +If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. +This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. + +``` + // Get index for separate storage... + enc := s2.NewWriter(w) + io.Copy(enc, r) + index, err := enc.CloseIndex() +``` + +The `index` can then be used needing to read from the stream. +This means the index can be used without needing to seek to the end of the stream +or for manually forwarding streams. See below. + +Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. + +## Using Indexes + +To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. + +Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. + +If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. +Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, nil) + rs.Seek(wantOffset, io.SeekStart) +``` + +Get a seeker to seek forward. Since no index is provided, the index is read from the stream. +This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +A custom index can be specified which will be used if supplied. +When using a custom index, it will not be read from the input stream. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(true, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. + +The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, +meaning changes performed to one is reflected in the other. + +To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. + +## Manually Forwarding Streams + +Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. +This can be used for parsing indexes, either separate or in streams. + +In some cases it may not be possible to serve a seekable stream. +This can for instance be an HTTP stream, where the Range request +is sent at the start of the stream. + +With a little bit of extra code it is still possible to use indexes +to forward to specific offset with a single forward skip. + +It is possible to load the index manually like this: +``` + var index s2.Index + _, err = index.Load(idxBytes) +``` + +This can be used to figure out how much to offset the compressed stream: + +``` + compressedOffset, uncompressedOffset, err := index.Find(wantOffset) +``` + +The `compressedOffset` is the number of bytes that should be skipped +from the beginning of the compressed file. + +The `uncompressedOffset` will then be offset of the uncompressed bytes returned +when decoding from that position. This will always be <= wantOffset. + +When creating a decoder it must be specified that it should *not* expect a stream identifier +at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` +we create the decoder like this: + +``` + dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) +``` + +We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. +This is done using the regular "Skip" function: + +``` + err = dec.Skip(wantOffset - uncompressedOffset) +``` + +This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. + +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + +## Index Format: + +Each block is structured as a snappy skippable block, with the chunk ID 0x99. + +The block can be read from the front, but contains information so it can be read from the back as well. + +Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), +with un-encoded value length of 64 bits, unless other limits are specified. + +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | + +For regular streams the uncompressed offsets are fully predictable, +so `HasUncompressedOffsets` allows to specify that compressed blocks all have +exactly `EstBlockSize` bytes of uncompressed content. + +Entries *must* be in order, starting with the lowest offset, +and there *must* be no uncompressed offset duplicates. +Entries *may* point to the start of a skippable block, +but it is then not allowed to also have an entry for the next block since +that would give an uncompressed offset duplicate. + +There is no requirement for all blocks to be represented in the index. +In fact there is a maximum of 65536 block entries in an index. + +The writer can use any method to reduce the number of entries. +An implicit block start at 0,0 can be assumed. + +### Decoding entries: + +``` +// Read Uncompressed entries. +// Each assumes EstBlockSize delta from previous. +for each entry { + uOff = 0 + if HasUncompressedOffsets == 1 { + uOff = ReadVarInt // Read value from stream + } + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].UncompressedOffset = uOff + continue + } + + // Uncompressed uses previous offset and adds EstBlockSize + entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff +} + + +// Guess that the first block will be 50% of uncompressed size. +// Integer truncating division must be used. +CompressGuess := EstBlockSize / 2 + +// Read Compressed entries. +// Each assumes CompressGuess delta from previous. +// CompressGuess is adjusted for each value. +for each entry { + cOff = ReadVarInt // Read value from stream + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].CompressedOffset = cOff + continue + } + + // Compressed uses previous and our estimate. + entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff + + // Adjust compressed offset for next loop, integer truncating division must be used. + CompressGuess += cOff/2 +} +``` + +To decode from any given uncompressed offset `(wantOffset)`: + +* Iterate entries until `entry[n].UncompressedOffset > wantOffset`. +* Start decoding from `entry[n-1].CompressedOffset`. +* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream. + +See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + + +# Format Extensions + +* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. +* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). +* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. + +Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. + +The length is specified by reading the 3-bit length specified in the tag and decode using this table: + +| Length | Actual Length | +|--------|----------------------| +| 0 | 4 | +| 1 | 5 | +| 2 | 6 | +| 3 | 7 | +| 4 | 8 | +| 5 | 8 + read 1 byte | +| 6 | 260 + read 2 bytes | +| 7 | 65540 + read 3 bytes | + +This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. + +Lengths are stored as little endian values. + +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. + +Default streaming block size is 1MB. + +# Dictionary Encoding + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +A dictionary provides an initial repeat value that can be used to point to a common header. + +Other than that the dictionary contains values that can be used as back-references. + +Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller. + +## Format + +Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes). + +Encoding: `[repeat value (uvarint)][dictionary content...]` + +Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset. +This value is an offset into the dictionary content and not a back-reference offset, +so setting this to 0 will make the repeat value point to the first value of the dictionary. + +The value must be less than the dictionary length-8 + +## Encoding + +From the decoder point of view the dictionary content is seen as preceding the encoded content. + +`[dictionary content][decoded output]` + +Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block. + +Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data. +However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed. + +The first match can be a repeat value, which will use the repeat offset stored in the dictionary. + +When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary, +neither by a copy nor repeat operations. +If the boundary is crossed while copying from the dictionary, the operation should complete, +but the next instruction is not allowed to reference the dictionary. + +Valid blocks encoded *without* a dictionary can be decoded with any dictionary. +There are no checks whether the supplied dictionary is the correct for a block. +Because of this there is no overhead by using a dictionary. + +## Example + +This is the dictionary content. Elements are separated by `[]`. + +Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`. + +Initial repeat offset is set at 10, which is the letter `2`. + +Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]` + +Decoded: `[10][ bananas w][hich][ were ][brown ][were added]` + +Output: `10 bananas which were brown were added` + + +## Streams + +For streams each block can use the dictionary. + +The dictionary cannot not currently be provided on the stream. + + +# LICENSE + +This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. + +Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go new file mode 100644 index 000000000000..264ffd0a9b4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -0,0 +1,443 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" + "strconv" + + "github.com/klauspost/compress/internal/race" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("s2: corrupt input") + // ErrCRC reports that the input failed CRC validation (streams only) + ErrCRC = errors.New("s2: corrupt input, crc mismatch") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("s2: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("s2: unsupported input") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + + race.WriteSlice(dst) + race.ReadSlice(src[s:]) + + if s2Decode(dst, src[s:]) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2DecodeDict(dst, src []byte, dict *Dict) int { + if dict == nil { + return s2Decode(dst, src) + } + const debug = false + const debugErrs = debug + + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := len(dict.dict) - dict.repeat + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + startOff := len(dict.dict) - offset + d + if startOff < 0 || startOff+length > len(dict.dict) { + if debugErrs { + fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict)) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff) + } + copy(dst[d:d+length], dict.dict[startOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + rOff := len(dict.dict) - (offset - d) + if debug { + fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff) + } + if rOff+length > len(dict.dict) { + if debugErrs { + fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + if rOff < 0 { + if debugErrs { + fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + copy(dst[d:d+length], dict.dict[rOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + if debugErrs { + fmt.Println("wanted length", len(dst), "got", d) + } + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s new file mode 100644 index 000000000000..9b105e03c59c --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s @@ -0,0 +1,568 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 AX +#define R_TMP1 BX +#define R_LEN CX +#define R_OFF DX +#define R_SRC SI +#define R_DST DI +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x (shared) +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $48-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DST + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SRC + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + XORQ R_OFF, R_OFF + +loop: + // for s < len(src) + CMPQ R_SRC, R_SEND + JEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (R_SRC), R_LEN + MOVL R_LEN, R_TMP1 + ANDL $3, R_TMP1 + CMPL R_TMP1, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, R_LEN + CMPL R_LEN, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + INCQ R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVQ R_DEND, R_TMP0 + SUBQ R_DST, R_TMP0 + MOVQ R_SEND, R_TMP1 + SUBQ R_SRC, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ R_LEN, $16 + JGT callMemmove + CMPQ R_TMP0, $16 + JLT callMemmove + CMPQ R_TMP1, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R_SRC), X0 + MOVOU X0, 0(R_DST) + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ R_LEN, R_TMP0 + JGT errCorrupt + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ R_DST, 0(SP) + MOVQ R_SRC, 8(SP) + MOVQ R_LEN, 16(SP) + MOVQ R_DST, 24(SP) + MOVQ R_SRC, 32(SP) + MOVQ R_LEN, 40(SP) + MOVQ R_OFF, 48(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVQ 24(SP), R_DST + MOVQ 32(SP), R_SRC + MOVQ 40(SP), R_LEN + MOVQ 48(SP), R_OFF + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ R_LEN, R_SRC + SUBQ $58, R_SRC + CMPQ R_SRC, R_SEND + JA errCorrupt + + // case x == 60: + CMPL R_LEN, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(R_SRC), R_LEN + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(R_SRC), R_LEN + JMP doLit + +tagLit62Plus: + CMPL R_LEN, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + // We read one byte, safe to read one back, since we are just reading tag. + // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(R_SRC), R_LEN + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(R_SRC), R_OFF + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(R_SRC), R_OFF + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMPQ R_TMP1, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // length = 4 + int(src[s-2])>>2&0x7 + MOVBQZX -1(R_SRC), R_TMP1 + MOVQ R_LEN, R_TMP0 + SHRQ $2, R_LEN + ANDQ $0xe0, R_TMP0 + ANDQ $7, R_LEN + SHLQ $3, R_TMP0 + ADDQ $4, R_LEN + ORQ R_TMP1, R_TMP0 + + // check if repeat code, ZF set by ORQ. + JZ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (length) + MOVQ R_TMP0, R_OFF + JMP doCopy + +// This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMPQ R_LEN, $9 + JL doCopyRepeat + + // Read additional bytes for length. + JE repeatLen1 + + // Rare, so the extra branch shouldn't hurt too much. + CMPQ R_LEN, $10 + JE repeatLen2 + JMP repeatLen3 + +// Read repeat lengths. +repeatLen1: + // s ++ + ADDQ $1, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = src[s-1] + 8 + MOVBQZX -1(R_SRC), R_LEN + ADDL $8, R_LEN + JMP doCopyRepeat + +repeatLen2: + // s +=2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) + MOVWQZX -2(R_SRC), R_LEN + ADDL $260, R_LEN + JMP doCopyRepeat + +repeatLen3: + // s +=3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) + // Read one byte further back (just part of the tag, shifted out) + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + ADDL $65540, R_LEN + JMP doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVQ R_DST, R_TMP1 + SUBQ R_DBASE, R_TMP1 + CMPQ R_TMP1, R_OFF + JLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + // if offset <= 0 { etc } + CMPQ R_OFF, $0 + JLE errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R_DEND, R_TMP1 + SUBQ R_DST, R_TMP1 + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVQ R_DEND, R_TMP2 + SUBQ R_DST, R_TMP2 + MOVQ R_DST, R_TMP3 + SUBQ R_OFF, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ R_LEN, $16 + JGT slowForwardCopy + CMPQ R_OFF, $8 + JLT slowForwardCopy + CMPQ R_TMP2, $16 + JLT slowForwardCopy + MOVQ 0(R_TMP3), R_TMP0 + MOVQ R_TMP0, 0(R_DST) + MOVQ 8(R_TMP3), R_TMP1 + MOVQ R_TMP1, 8(R_DST) + ADDQ R_LEN, R_DST + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R_TMP2 + CMPQ R_LEN, R_TMP2 + JGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVQ R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMPQ R_TMP2, $8 + JGE fixUpSlowForwardCopy + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_DST) + SUBQ R_TMP2, R_LEN + ADDQ R_TMP2, R_DST + ADDQ R_TMP2, R_TMP2 + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ R_DST, R_TMP0 + ADDQ R_LEN, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ R_LEN, $0 + JLE loop + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_TMP0) + ADDQ $8, R_TMP3 + ADDQ $8, R_TMP0 + SUBQ $8, R_LEN + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + INCQ R_TMP3 + INCQ R_DST + DECQ R_LEN + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ R_DST, R_DEND + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s new file mode 100644 index 000000000000..78e463f342b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -0,0 +1,574 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 R2 +#define R_TMP1 R3 +#define R_LEN R4 +#define R_OFF R5 +#define R_SRC R6 +#define R_DST R7 +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// TEST_SRC will check if R_SRC is <= SRC_END +#define TEST_SRC() \ + CMP R_SEND, R_SRC \ + BGT errCorrupt + +// MOVD R_SRC, R_TMP1 +// SUB R_SBASE, R_TMP1, R_TMP1 +// CMP R_SLEN, R_TMP1 +// BGT errCorrupt + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $56-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DST + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SRC + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + MOVD $0, R_OFF + +loop: + // for s < len(src) + CMP R_SEND, R_SRC + BEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R_SRC), R_LEN + MOVW R_LEN, R_TMP1 + ANDW $3, R_TMP1 + MOVW $1, R1 + CMPW R1, R_TMP1 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R_LEN, R_LEN + CMPW R_LEN, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R_SRC, R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + ADD $1, R_LEN, R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVD R_DEND, R_TMP0 + SUB R_DST, R_TMP0, R_TMP0 + MOVD R_SEND, R_TMP1 + SUB R_SRC, R_TMP1, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R_LEN + BGT callMemmove + CMP $16, R_TMP0 + BLT callMemmove + CMP $16, R_TMP1 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R_SRC), (R_TMP2, R_TMP3) + STP (R_TMP2, R_TMP3), 0(R_DST) + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R_TMP0, R_LEN + BGT errCorrupt + CMP R_TMP1, R_LEN + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R_DST, 8(RSP) + MOVD R_SRC, 16(RSP) + MOVD R_LEN, 24(RSP) + MOVD R_DST, 32(RSP) + MOVD R_SRC, 40(RSP) + MOVD R_LEN, 48(RSP) + MOVD R_OFF, 56(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVD 32(RSP), R_DST + MOVD 40(RSP), R_SRC + MOVD 48(RSP), R_LEN + MOVD 56(RSP), R_OFF + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R_LEN, R_SRC, R_SRC + SUB $58, R_SRC, R_SRC + TEST_SRC() + + // case x == 60: + MOVW $61, R1 + CMPW R1, R_LEN + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R_SRC), R_LEN + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R_SRC), R_LEN + B doLit + +tagLit62Plus: + CMPW $62, R_LEN + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R_SRC), R_LEN + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP1<<16, R_LEN + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R_SRC), R_LEN + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + MOVD R_SRC, R_TMP1 + SUB R_SBASE, R_TMP1, R_TMP1 + CMP R_SLEN, R_TMP1 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R_SRC), R_OFF + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R_SRC), R_OFF + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMP $2, R_TMP1 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // Calculate offset in R_TMP0 in case it is a repeat. + MOVD R_LEN, R_TMP0 + AND $0xe0, R_TMP0 + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP0<<3, R_TMP1, R_TMP0 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R_LEN>>2, R1, R_LEN + ADD $4, R_LEN, R_LEN + + // check if repeat code with offset 0. + CMP $0, R_TMP0 + BEQ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (offset) + MOVD R_TMP0, R_OFF + B doCopy + + // This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMP $9, R_LEN + BLT doCopyRepeat + BEQ repeatLen1 + CMP $10, R_LEN + BEQ repeatLen2 + +repeatLen3: + // s +=3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 + MOVBU -1(R_SRC), R_TMP0 + MOVHU -3(R_SRC), R_LEN + ORR R_TMP0<<16, R_LEN, R_LEN + ADD $65540, R_LEN, R_LEN + B doCopyRepeat + +repeatLen2: + // s +=2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 + MOVHU -2(R_SRC), R_LEN + ADD $260, R_LEN, R_LEN + B doCopyRepeat + +repeatLen1: + // s +=1 + ADD $1, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = src[s-1] + 8 + MOVBU -1(R_SRC), R_LEN + ADD $8, R_LEN, R_LEN + B doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVD R_DST, R_TMP1 + SUB R_DBASE, R_TMP1, R_TMP1 + CMP R_OFF, R_TMP1 + BLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + + // if offset <= 0 { etc } + CMP $0, R_OFF + BLE errCorrupt + + // if length > len(dst)-d { etc } + MOVD R_DEND, R_TMP1 + SUB R_DST, R_TMP1, R_TMP1 + CMP R_TMP1, R_LEN + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVD R_DEND, R_TMP2 + SUB R_DST, R_TMP2, R_TMP2 + MOVD R_DST, R_TMP3 + SUB R_OFF, R_TMP3, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R_LEN + BGT slowForwardCopy + CMP $8, R_OFF + BLT slowForwardCopy + CMP $16, R_TMP2 + BLT slowForwardCopy + MOVD 0(R_TMP3), R_TMP0 + MOVD R_TMP0, 0(R_DST) + MOVD 8(R_TMP3), R_TMP1 + MOVD R_TMP1, 8(R_DST) + ADD R_LEN, R_DST, R_DST + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R_TMP2, R_TMP2 + CMP R_TMP2, R_LEN + BGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVD R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMP $8, R_TMP2 + BGE fixUpSlowForwardCopy + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_DST) + SUB R_TMP2, R_LEN, R_LEN + ADD R_TMP2, R_DST, R_DST + ADD R_TMP2, R_TMP2, R_TMP2 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R_DST, R_TMP0 + ADD R_LEN, R_DST, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R_LEN + BLE loop + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_TMP0) + ADD $8, R_TMP3, R_TMP3 + ADD $8, R_TMP0, R_TMP0 + SUB $8, R_LEN, R_LEN + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + ADD $1, R_TMP3, R_TMP3 + ADD $1, R_DST, R_DST + SUB $1, R_LEN, R_LEN + CBNZ R_LEN, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R_DEND, R_DST + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R_TMP0 + MOVD R_TMP0, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go new file mode 100644 index 000000000000..cb3576edd470 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64) && !appengine && gc && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !noasm + +package s2 + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go new file mode 100644 index 000000000000..c99d40b69d0d --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -0,0 +1,288 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm64 appengine !gc noasm + +package s2 + +import ( + "fmt" + "strconv" + + "github.com/klauspost/compress/internal/le" +) + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2Decode(dst, src []byte) int { + const debug = false + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := 0 + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + x = uint32(src[s+1]) + s += 2 + case x == 61: + x = uint32(le.Load16(src, s+1)) + s += 3 + case x == 62: + // Load as 32 bit and shift down. + x = le.Load32(src, s) + x >>= 8 + s += 4 + case x == 63: + x = le.Load32(src, s+1) + s += 5 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + length = int(le.Load16(src, s)) + 1<<8 + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + offset = int(le.Load16(src, s+1)) + length = 1 + int(src[s])>>2 + s += 3 + + case tagCopy4: + offset = int(le.Load32(src, s+1)) + length = 1 + int(src[s])>>2 + s += 5 + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go new file mode 100644 index 000000000000..f125ad096373 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/dict.go @@ -0,0 +1,350 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "sync" +) + +const ( + // MinDictSize is the minimum dictionary size when repeat has been read. + MinDictSize = 16 + + // MaxDictSize is the maximum dictionary size when repeat has been read. + MaxDictSize = 65536 + + // MaxDictSrcOffset is the maximum offset where a dictionary entry can start. + MaxDictSrcOffset = 65535 +) + +// Dict contains a dictionary that can be used for encoding and decoding s2 +type Dict struct { + dict []byte + repeat int // Repeat as index of dict + + fast, better, best sync.Once + fastTable *[1 << 14]uint16 + + betterTableShort *[1 << 14]uint16 + betterTableLong *[1 << 17]uint16 + + bestTableShort *[1 << 16]uint32 + bestTableLong *[1 << 19]uint32 +} + +// NewDict will read a dictionary. +// It will return nil if the dictionary is invalid. +func NewDict(dict []byte) *Dict { + if len(dict) == 0 { + return nil + } + var d Dict + // Repeat is the first value of the dict + r, n := binary.Uvarint(dict) + if n <= 0 { + return nil + } + dict = dict[n:] + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize || len(dict) > MaxDictSize { + return nil + } + d.repeat = int(r) + if d.repeat > len(dict) { + return nil + } + return &d +} + +// Bytes will return a serialized version of the dictionary. +// The output can be sent to NewDict. +func (d *Dict) Bytes() []byte { + dst := make([]byte, binary.MaxVarintLen16+len(d.dict)) + return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...) +} + +// MakeDict will create a dictionary. +// 'data' must be at least MinDictSize. +// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used. +// If searchStart is set the start repeat value will be set to the last +// match of this content. +// If no matches are found, it will attempt to find shorter matches. +// This content should match the typical start of a block. +// If at least 4 bytes cannot be matched, repeat is set to start of block. +func MakeDict(data []byte, searchStart []byte) *Dict { + if len(data) == 0 { + return nil + } + if len(data) > MaxDictSize { + data = data[len(data)-MaxDictSize:] + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize { + return nil + } + + // Find the longest match possible, last entry if multiple. + for s := len(searchStart); s > 4; s-- { + if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 { + d.repeat = idx + break + } + } + + return &d +} + +// MakeDictManual will create a dictionary. +// 'data' must be at least MinDictSize and less than or equal to MaxDictSize. +// A manual first repeat index into data must be provided. +// It must be less than len(data)-8. +func MakeDictManual(data []byte, firstIdx uint16) *Dict { + if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize { + return nil + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + + d.repeat = int(firstIdx) + return &d +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockDictGo(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBetterDict(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBest(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func (d *Dict) Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2DecodeDict(dst, src[s:], d) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +func (d *Dict) initFast() { + d.fast.Do(func() { + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8-2; i += 3 { + x0 := load64(d.dict, i) + h0 := hash6(x0, tableBits) + h1 := hash6(x0>>8, tableBits) + h2 := hash6(x0>>16, tableBits) + table[h0] = uint16(i) + table[h1] = uint16(i + 1) + table[h2] = uint16(i + 2) + } + d.fastTable = &table + }) +} + +func (d *Dict) initBetter() { + d.better.Do(func() { + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + lTable[hash7(cv, lTableBits)] = uint16(i) + sTable[hash4(cv, sTableBits)] = uint16(i) + } + d.betterTableShort = &sTable + d.betterTableLong = &lTable + }) +} + +func (d *Dict) initBest() { + d.best.Do(func() { + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + lTable[hashL] = uint32(i) | candidateL<<16 + sTable[hashS] = uint32(i) | candidateS<<16 + } + d.bestTableShort = &sTable + d.bestTableLong = &lTable + }) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go new file mode 100644 index 000000000000..330e755716fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -0,0 +1,418 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/internal/race" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlock(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +var estblockPool [2]sync.Pool + +// EstimateBlockSize will perform a very fast compression +// without outputting the result and return the compressed output size. +// The function returns -1 if no improvement could be achieved. +// Using actual compression will most often produce better compression than the estimate. +func EstimateBlockSize(src []byte) (d int) { + if len(src) <= inputMargin || int64(len(src)) > 0xffffffff { + return -1 + } + if len(src) <= 1024 { + const sz, pool = 2048, 0 + tmp, ok := estblockPool[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer estblockPool[pool].Put(tmp) + + d = calcBlockSizeSmall(src, tmp) + } else { + const sz, pool = 32768, 1 + tmp, ok := estblockPool[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer estblockPool[pool].Put(tmp) + + d = calcBlockSize(src, tmp) + } + + if d == 0 { + return -1 + } + // Size of the varint encoded block size. + d += (bits.Len64(uint64(len(src))) + 7) / 7 + + if d >= len(src) { + return -1 + } + return d +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBetter(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBest(dst[d:], src, nil) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappy(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBetterSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBestSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. +// If the destination is nil or too small, a new will be allocated. +// The blocks are not validated, so garbage in = garbage out. +// dst may not overlap block data. +// Any data in dst is preserved as is, so it will not be considered a block. +func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { + totalSize := uint64(0) + compSize := 0 + for _, b := range blocks { + l, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + totalSize += uint64(l) + compSize += len(b) - hdr + } + if totalSize == 0 { + dst = append(dst, 0) + return dst, nil + } + if totalSize > math.MaxUint32 { + return nil, ErrTooLarge + } + var tmp [binary.MaxVarintLen32]byte + hdrSize := binary.PutUvarint(tmp[:], totalSize) + wantSize := hdrSize + compSize + + if cap(dst)-len(dst) < wantSize { + dst = append(make([]byte, 0, wantSize+len(dst)), dst...) + } + dst = append(dst, tmp[:hdrSize]...) + for _, b := range blocks { + _, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + dst = append(dst, b[hdr:]...) + } + return dst, nil +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 8 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// will be accepted by the encoder. +const minNonLiteralBlockSize = 32 + +const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits) + +// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. +// Blocks this big are highly discouraged, though. +// Half the size on 32 bit systems. +const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5 + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +// 32 bit platforms will have lower thresholds for rejecting big content. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + // Also includes negative. + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + // Size of the varint encoded block size. + n = n + uint64((bits.Len64(n)+7)/7) + + // Add maximum size of encoding block as literals. + n += uint64(literalExtraSize(int64(srcLen))) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + return int(n) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go new file mode 100644 index 000000000000..9d12c44f38a6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -0,0 +1,1477 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/bits" + + "github.com/klauspost/compress/internal/le" +) + +func load32(b []byte, i int) uint32 { + return le.Load32(b, i) +} + +func load64(b []byte, i int) uint64 { + return le.Load64(b, i) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + const prime6bytes = 227718039650203 + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +func encodeGo(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + var n int + if len(src) < 64<<10 { + n = encodeBlockGo64K(dst[d:], src) + } else { + n = encodeBlockGo(dst[d:], src) + } + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockGo64K is a specialized version for compressing blocks <= 64KB +func encodeBlockGo64K(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>5 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint16(s) + table[hash1] = uint16(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint16(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint16(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint16(s - 2) + table[currHash] = uint16(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockSnappyGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockSnappyGo64K is a special version of encodeBlockSnappyGo for sizes <64KB +func encodeBlockSnappyGo64K(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>5 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint16(s) + table[hash1] = uint16(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint16(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint16(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint16(s - 2) + table[currHash] = uint16(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + dict.initFast() + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := min(len(src)-inputMargin, MaxDictSrcOffset-maxAhead) + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form can start with a dict entry (copy or repeat). + s := 0 + + // Convert dict repeat to offset + repeat := len(dict.dict) - dict.repeat + cv := load64(src, 0) + + // While in dict +searchDict: + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + if nextS > sLimit { + if debug { + fmt.Println("slimit reached", s, nextS) + } + break searchDict + } + candidateDict := int(dict.fastTable[hash0]) + candidateDict2 := int(dict.fastTable[hash1]) + candidate2 := int(table[hash1]) + candidate := int(table[hash0]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + + if repeat > s { + candidate := len(dict.dict) - repeat + s + if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + cv = load64(src, s) + continue + } + } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + if debug { + fmt.Println("emitted reg repeat", s-base, "s:", s) + } + cv = load64(src, s) + continue searchDict + } + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue searchDict + } + // Start with table. These matches will always be closer. + if uint32(cv) == load32(src, candidate) { + goto emitMatch + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + goto emitMatch + } + + // Check dict. Dicts have longer offsets, so we want longer matches. + if cv == load64(dict.dict, candidateDict) { + table[hash2] = uint32(s + 2) + goto emitDict + } + + candidateDict = int(dict.fastTable[hash2]) + // Check if upper 7 bytes match + if candidateDict2 >= 1 { + if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) { + table[hash2] = uint32(s + 2) + candidateDict = candidateDict2 + s++ + goto emitDict + } + } + + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + goto emitMatch + } + if candidateDict >= 2 { + // Check if upper 6 bytes match + if cv^load64(dict.dict, candidateDict-2) < (1 << 16) { + s += 2 + goto emitDict + } + } + + cv = load64(src, nextS) + s = nextS + continue searchDict + + emitDict: + { + if debug { + if load32(dict.dict, candidateDict) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] { + candidateDict-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = s + (len(dict.dict)) - candidateDict + + // Extend the 4-byte match as long as possible. + s += 4 + candidateDict += 4 + for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateDict += 8 + } + + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], repeat, s-base) + } else { + // Split to ensure we don't start a copy within next block + d += emitCopy(dst[d:], repeat, 4) + d += emitRepeat(dst[d:], repeat, s-base-4) + } + if false { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index and continue loop to try new candidate. + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>8, tableBits) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s - 1) + cv = load64(src, s) + } + continue + } + emitMatch: + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + if debug { + fmt.Println("non-dict matching at", s, "repeat:", repeat) + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + if debug { + fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go new file mode 100644 index 000000000000..7aadd255fe3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -0,0 +1,317 @@ +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +import ( + "sync" + + "github.com/klauspost/compress/internal/race" +) + +const hasAmd64Asm = true + +var encPools [4]sync.Pool + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) >= 4<<20 { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm(dst, src, tmp) + } + if len(src) >= limit12B { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm4MB(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 16384, 1 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 4096, 2 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + const sz, pool = 1024, 3 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm8B(dst, src, tmp) +} + +var encBetterPools [5]sync.Pool + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetter(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) > 4<<20 { + const sz, pool = 589824, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeBetterBlockAsm(dst, src, tmp) + } + if len(src) >= limit12B { + const sz, pool = 589824, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeBetterBlockAsm4MB(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 81920, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeBetterBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 20480, 1 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeBetterBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + const sz, pool = 5120, 2 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeBetterBlockAsm8B(dst, src, tmp) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) > 65536 { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm(dst, src, tmp) + } + if len(src) >= limit12B { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm64K(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 16384, 1 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 4096, 2 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + const sz, pool = 1024, 3 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm8B(dst, src, tmp) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) > 65536 { + const sz, pool = 589824, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeSnappyBetterBlockAsm(dst, src, tmp) + } + + if len(src) >= limit12B { + const sz, pool = 294912, 4 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeSnappyBetterBlockAsm64K(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 81920, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeSnappyBetterBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 20480, 1 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeSnappyBetterBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + const sz, pool = 5120, 2 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeSnappyBetterBlockAsm8B(dst, src, tmp) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go new file mode 100644 index 000000000000..c857c5c2839f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -0,0 +1,793 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "fmt" + "math" + "math/bits" +) + +// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + + debug = false + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + sLimitDict := min(len(src)-inputMargin, MaxDictSrcOffset-inputMargin) + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + repeat := 1 + if dict != nil { + dict.initBest() + s = 0 + repeat = len(dict.dict) - dict.repeat + } + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + rep, dict bool + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + if dict != nil && s >= MaxDictSrcOffset { + dict = nil + if repeat > s { + repeat = math.MinInt32 + } + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + if m.rep { + return score - emitRepeatSize(offset, m.length) + } + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32, rep bool) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset, rep: rep} + s += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + matchDict := func(candidate, s int, first uint32, rep bool) match { + if s >= MaxDictSrcOffset { + return match{offset: candidate, s: s} + } + // Calculate offset as if in continuous array with s + offset := -len(dict.dict) + candidate + if best.length != 0 && best.s-best.offset == s-offset && !rep { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + + if load32(dict.dict, candidate) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true} + s += 4 + if !rep { + for s < sLimitDict && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } else { + for s < len(src) && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } + m.length -= candidate + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + if s > 0 { + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + } + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false)) + } + { + if (dict == nil || repeat <= s) && repeat > 0 { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + } else if s-repeat < -4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + candidate++ + best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true)) + } + + if best.length > 0 { + hashS := hash4(cv>>8, sTableBits) + // s+1 + nextShort := sTable[hashS] + s := s + 1 + cv := load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong := lTable[hashL] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at + 1 + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + + // s+2 + if true { + hashS := hash4(cv>>8, sTableBits) + + nextShort = sTable[hashS] + s++ + cv = load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong = lTable[hashL] + + if (dict == nil || repeat <= s) && repeat > 0 { + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true)) + } else if repeat-s > 4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + } + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at +2 + // Very small gain + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + } + // Search for a match at best match end, see if that is better. + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning + // Load initial values + cv = load64(src, sBack) + + // Grab candidates... + next := lTable[hash8(load64(src, sAt), lTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if !best.rep && !best.dict { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + s += best.length + + if offset > 65535 && s-base <= 5 && !best.rep { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + if debug && nextEmit != base { + fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base) + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if best.rep { + if nextEmit > 0 || best.dict { + if debug { + fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], offset, best.length) + } else { + // First match without dict cannot be a repeat. + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + } else { + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBestSnappy(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + + return score - emitCopyNoRepeatSize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if true { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, best.length) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// emitCopySize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopySize(offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeatSize(offset, length) + } + i = 5 + } + if length == 0 { + return i + } + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + if offset < 2048 { + // Emit 8 bytes, then rest as repeats... + return 2 + emitRepeatSize(offset, length-8) + } + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeatSize(offset, length-60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitCopyNoRepeatSize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeatSize(offset, length int) int { + if offset >= 65536 { + return 5 + 5*(length/64) + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + 3*(length/60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitRepeatSize returns the number of bytes required to encode a repeat. +// Length must be at least 4 and < 1<<24 +func emitRepeatSize(offset, length int) int { + // Repeat offset, make length cheaper + if length <= 4+4 || (length < 8+4 && offset < 2048) { + return 2 + } + if length < (1<<8)+4+4 { + return 3 + } + if length < (1<<16)+(1<<8)+4 { + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= (1 << 16) - 4 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + } + if left > 0 { + return 5 + emitRepeatSize(offset, left) + } + return 5 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go new file mode 100644 index 000000000000..1e30fb731766 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -0,0 +1,1507 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "fmt" + "math/bits" +) + +// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint64, h uint8) uint32 { + const prime4bytes = 2654435761 + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + const prime5bytes = 889523592379 + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + const prime7bytes = 58295818150454627 + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + const prime8bytes = 0xcf1bbcdcb7a56463 + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + // lTable could be postponed, but very minor difference. + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = min(s+(s-nextEmit)>>7+1, s+maxSkip) + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockBetterGo64K(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + // Initialize the hash tables. + // Use smaller tables for smaller blocks + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 13 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>6 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint16(s) + sTable[hashS] = uint16(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint16(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint16(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint16(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint16(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1) + + // lTable could be postponed, but very minor difference. + lTable[hash7(cv1, lTableBits)] = uint16(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo64K(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + // Use smaller tables for smaller blocks + const ( + // Long hash matches. + lTableBits = 15 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 13 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = min(s+(s-nextEmit)>>6+1, s+maxSkip) + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint16(s) + sTable[hashS] = uint16(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint16(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint16(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint16(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + + sLimit := min(len(src)-inputMargin, MaxDictSrcOffset-maxAhead) + if len(src) < minNonLiteralBlockSize { + return 0 + } + + dict.initBetter() + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 0 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := len(dict.dict) - dict.repeat + + // While in dict +searchDict: + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + break searchDict + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + dictL := int(dict.betterTableLong[hashL]) + dictS := int(dict.betterTableShort[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if s != 0 { + if cv == valLong { + goto emitMatch + } + if cv == valShort { + candidateL = candidateS + goto emitMatch + } + } + + // Check dict repeat. + if repeat >= s+4 { + candidate := len(dict.dict) - repeat + s + if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + continue + } + } + // Don't try to find match at s==0 + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + goto emitMatch + } + + // Long dict... + if uint32(cv) == load32(dict.dict, dictL) { + candidateL = dictL + goto emitDict + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + // Use our short candidate. + candidateL = candidateS + goto emitMatch + } + if uint32(cv) == load32(dict.dict, dictS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + candidateL = dictS + goto emitDict + } + cv = load64(src, nextS) + s = nextS + } + emitDict: + { + if debug { + if load32(dict.dict, candidateL) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + offset := s + (len(dict.dict)) - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if repeat == offset { + if debug { + fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], offset, s-base) + } else { + // Split to ensure we don't start a copy within next block. + d += emitCopy(dst[d:], offset, 4) + d += emitRepeat(dst[d:], offset, s-base-4) + } + repeat = offset + } + if false { + // Validate match. + if s <= candidateL { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + continue + } + emitMatch: + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + if repeat == offset { + if debug { + fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s) + } + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go new file mode 100644 index 000000000000..e25b78445d76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -0,0 +1,741 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package s2 + +import ( + "bytes" + "math/bits" +) + +const hasAmd64Asm = false + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlock(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + if len(src) <= 64<<10 { + return encodeBlockGo64K(dst, src) + } + return encodeBlockGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetter(dst, src []byte) (d int) { + if len(src) <= 64<<10 { + return encodeBlockBetterGo64K(dst, src) + } + return encodeBlockBetterGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + if len(src) <= 64<<10 { + return encodeBlockBetterSnappyGo64K(dst, src) + } + return encodeBlockBetterSnappyGo(dst, src) +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockSnappy(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + if len(src) <= 64<<10 { + return encodeBlockSnappyGo64K(dst, src) + } + return encodeBlockSnappyGo(dst, src) +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteral(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + const num = 63<<2 | tagLiteral + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat(dst []byte, offset, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopy(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeat(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitCopyNoRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitCopyNoRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +func matchLen(a []byte, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} + +// input must be > inputMargin +func calcBlockSize(src []byte, _ *[32768]byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 13 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// length must be > inputMargin. +func calcBlockSizeSmall(src []byte, _ *[2048]byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 9 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralSize(lit []byte) int { + if len(lit) == 0 { + return 0 + } + switch { + case len(lit) <= 60: + return len(lit) + 1 + case len(lit) <= 1<<8: + return len(lit) + 2 + case len(lit) <= 1<<16: + return len(lit) + 3 + case len(lit) <= 1<<24: + return len(lit) + 4 + default: + return len(lit) + 5 + } +} + +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockAsm should be unreachable") +} + +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockSnappyAsm should be unreachable") +} + +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockAsm should be unreachable") +} + +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockSnappyAsm should be unreachable") +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go new file mode 100644 index 000000000000..f43aa8154355 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -0,0 +1,228 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +package s2 + +func _dummy_() + +// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int + +// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int + +// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int + +// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int + +// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int + +// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int + +// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int + +// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int + +// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int + +// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int + +// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int + +// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int + +// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int + +// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int + +// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int + +// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int + +// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSize(src []byte, tmp *[32768]byte) int + +// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 1024 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +//go:noescape +func emitLiteral(dst []byte, lit []byte) int + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<32 +// +//go:noescape +func emitRepeat(dst []byte, offset int, length int) int + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopy(dst []byte, offset int, length int) int + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopyNoRepeat(dst []byte, offset int, length int) int + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +// +//go:noescape +func matchLen(a []byte, b []byte) int + +// cvtLZ4Block converts an LZ4 block to S2 +// +//go:noescape +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to S2 +// +//go:noescape +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4Block converts an LZ4 block to Snappy +// +//go:noescape +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to Snappy +// +//go:noescape +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s new file mode 100644 index 000000000000..df9be687be7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -0,0 +1,21303 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func _dummy_() +TEXT ·_dummy_(SB), $0 +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + RET + +// func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm + +repeat_extend_back_loop_encodeBlockAsm: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm + +repeat_extend_back_end_encodeBlockAsm: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 5(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_repeat_emit_encodeBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_repeat_emit_encodeBlockAsm + +four_bytes_repeat_emit_encodeBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_encodeBlockAsm + +three_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm + +two_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm + JMP memmove_long_repeat_emit_encodeBlockAsm + +one_byte_repeat_emit_encodeBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm + +memmove_long_repeat_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm + +matchlen_bsf_16repeat_extend_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_match8_repeat_extend_encodeBlockAsm: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm + +matchlen_bsf_8_repeat_extend_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_match4_repeat_extend_encodeBlockAsm: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm + JB repeat_extend_forward_end_encodeBlockAsm + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm + +matchlen_match1_repeat_extend_encodeBlockAsm: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_repeat_encodeBlockAsm: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm + +cant_repeat_two_offset_match_repeat_encodeBlockAsm: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm + CMPL SI, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm + CMPL SI, $0x0100ffff + JB repeat_five_match_repeat_encodeBlockAsm + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_repeat_encodeBlockAsm + +repeat_five_match_repeat_encodeBlockAsm: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_match_repeat_encodeBlockAsm: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_match_repeat_encodeBlockAsm: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_match_repeat_encodeBlockAsm: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_match_repeat_encodeBlockAsm: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_as_copy_encodeBlockAsm: + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm + MOVB $0xff, (CX) + MOVL DI, 1(CX) + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +four_bytes_remain_repeat_as_copy_encodeBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm + XORL R8, R8 + LEAL -1(R8)(SI*4), SI + MOVB SI, (CX) + MOVL DI, 1(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + MOVL DI, R9 + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +long_offset_short_repeat_as_copy_encodeBlockAsm: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +emit_copy_three_repeat_as_copy_encodeBlockAsm: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm + +no_repeat_found_encodeBlockAsm: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm + +candidate3_match_encodeBlockAsm: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm + +candidate2_match_encodeBlockAsm: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm + +match_extend_back_loop_encodeBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm + JMP match_extend_back_loop_encodeBlockAsm + +match_extend_back_end_encodeBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x01000000 + JB four_bytes_match_emit_encodeBlockAsm + MOVB $0xfc, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeBlockAsm + +four_bytes_match_emit_encodeBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW R8, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBlockAsm + +three_bytes_match_emit_encodeBlockAsm: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm + +two_bytes_match_emit_encodeBlockAsm: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm + JMP memmove_long_match_emit_encodeBlockAsm + +one_byte_match_emit_encodeBlockAsm: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm + +memmove_long_match_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm: +match_nolit_loop_encodeBlockAsm: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm + +matchlen_bsf_16match_nolit_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm + +matchlen_match8_match_nolit_encodeBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm + +matchlen_bsf_8_match_nolit_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm + +matchlen_match4_match_nolit_encodeBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm + JB match_nolit_end_encodeBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm + +matchlen_match1_match_nolit_encodeBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm + MOVB $0xff, (CX) + MOVL SI, 1(CX) + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBlockAsm_emit_copy: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +four_bytes_remain_match_nolit_encodeBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm + XORL DI, DI + LEAL -1(DI)(R10*4), R10 + MOVB R10, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_match_nolit_encodeBlockAsm: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + MOVL SI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +long_offset_short_match_nolit_encodeBlockAsm: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_short_match_nolit_encodeBlockAsm: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +emit_copy_three_match_nolit_encodeBlockAsm: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm + INCL DX + JMP search_loop_encodeBlockAsm + +emit_remainder_encodeBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeBlockAsm + +four_bytes_emit_remainder_encodeBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBlockAsm + +three_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm + +two_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm + JMP memmove_long_emit_remainder_encodeBlockAsm + +one_byte_emit_remainder_encodeBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm + +memmove_long_emit_remainder_encodeBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm4MB(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm4MB: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm4MB: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm4MB + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm4MB + +repeat_extend_back_loop_encodeBlockAsm4MB: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm4MB + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm4MB + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm4MB + +repeat_extend_back_end_encodeBlockAsm4MB: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 4(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +three_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +two_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm4MB + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +one_byte_repeat_emit_encodeBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm4MB: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB + +memmove_long_repeat_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm4MB: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm4MB + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB + +matchlen_bsf_16repeat_extend_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match8_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm4MB + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB + +matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match4_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm4MB + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm4MB + JB repeat_extend_forward_end_encodeBlockAsm4MB + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match1_repeat_extend_encodeBlockAsm4MB: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm4MB + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm4MB: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm4MB + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm4MB + +cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm4MB + CMPL SI, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm4MB + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_match_repeat_encodeBlockAsm4MB: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_match_repeat_encodeBlockAsm4MB: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_match_repeat_encodeBlockAsm4MB: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_match_repeat_encodeBlockAsm4MB: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_as_copy_encodeBlockAsm4MB: + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xff, (CX) + MOVL DI, 1(CX) + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm4MB + XORL R8, R8 + LEAL -1(R8)(SI*4), SI + MOVB SI, (CX) + MOVL DI, 1(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +long_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm4MB: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm4MB + +no_repeat_found_encodeBlockAsm4MB: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm4MB + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm4MB + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm4MB + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm4MB + +candidate3_match_encodeBlockAsm4MB: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm4MB + +candidate2_match_encodeBlockAsm4MB: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm4MB + +match_extend_back_loop_encodeBlockAsm4MB: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm4MB + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm4MB + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm4MB + JMP match_extend_back_loop_encodeBlockAsm4MB + +match_extend_back_end_encodeBlockAsm4MB: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 4(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm4MB: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm4MB + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW R8, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +three_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +two_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm4MB + JMP memmove_long_match_emit_encodeBlockAsm4MB + +one_byte_match_emit_encodeBlockAsm4MB: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm4MB: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm4MB + +memmove_long_match_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm4MB: +match_nolit_loop_encodeBlockAsm4MB: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm4MB + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB + +matchlen_bsf_16match_nolit_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_match8_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm4MB + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm4MB + +matchlen_bsf_8_match_nolit_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_match4_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm4MB + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm4MB + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm4MB + JB match_nolit_end_encodeBlockAsm4MB + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm4MB + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm4MB + +matchlen_match1_match_nolit_encodeBlockAsm4MB: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm4MB + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm4MB: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm4MB + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB + MOVB $0xff, (CX) + MOVL SI, 1(CX) + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm4MB + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBlockAsm4MB: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm4MB + XORL DI, DI + LEAL -1(DI)(R10*4), R10 + MOVB R10, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm4MB + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +long_offset_short_match_nolit_encodeBlockAsm4MB: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBlockAsm4MB: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +emit_copy_three_match_nolit_encodeBlockAsm4MB: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm4MB: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm4MB: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm4MB + INCL DX + JMP search_loop_encodeBlockAsm4MB + +emit_remainder_encodeBlockAsm4MB: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 4(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm4MB: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +three_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +two_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +one_byte_emit_remainder_encodeBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm4MB: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB + +memmove_long_emit_remainder_encodeBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm4MB: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000080, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm12B + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm12B + +repeat_extend_back_loop_encodeBlockAsm12B: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm12B + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm12B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm12B + +repeat_extend_back_end_encodeBlockAsm12B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm12B + JB three_bytes_repeat_emit_encodeBlockAsm12B + +three_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +two_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm12B + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +one_byte_repeat_emit_encodeBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm12B + +memmove_long_repeat_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm12B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm12B + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B + +matchlen_bsf_16repeat_extend_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match8_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm12B + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm12B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match4_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm12B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm12B + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm12B + JB repeat_extend_forward_end_encodeBlockAsm12B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm12B + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match1_repeat_extend_encodeBlockAsm12B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm12B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm12B: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm12B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm12B + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm12B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm12B + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_match_repeat_encodeBlockAsm12B: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_match_repeat_encodeBlockAsm12B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_match_repeat_encodeBlockAsm12B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_as_copy_encodeBlockAsm12B: + // emitCopy + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +long_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeBlockAsm12B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm12B: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm12B + +no_repeat_found_encodeBlockAsm12B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm12B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm12B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm12B + +candidate3_match_encodeBlockAsm12B: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm12B + +candidate2_match_encodeBlockAsm12B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm12B + +match_extend_back_loop_encodeBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm12B + JMP match_extend_back_loop_encodeBlockAsm12B + +match_extend_back_end_encodeBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm12B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm12B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm12B + JB three_bytes_match_emit_encodeBlockAsm12B + +three_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm12B + +two_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm12B + JMP memmove_long_match_emit_encodeBlockAsm12B + +one_byte_match_emit_encodeBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm12B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm12B + +memmove_long_match_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm12B: +match_nolit_loop_encodeBlockAsm12B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm12B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B + +matchlen_bsf_16match_nolit_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_match8_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm12B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_match4_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm12B + JB match_nolit_end_encodeBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm12B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm12B + +matchlen_match1_match_nolit_encodeBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm12B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm12B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm12B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +long_offset_short_match_nolit_encodeBlockAsm12B: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBlockAsm12B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +emit_copy_three_match_nolit_encodeBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm12B + INCL DX + JMP search_loop_encodeBlockAsm12B + +emit_remainder_encodeBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm12B + JB three_bytes_emit_remainder_encodeBlockAsm12B + +three_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +two_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm12B + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +one_byte_emit_remainder_encodeBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm12B + +memmove_long_emit_remainder_encodeBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000020, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm10B + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm10B + +repeat_extend_back_loop_encodeBlockAsm10B: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm10B + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm10B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm10B + +repeat_extend_back_end_encodeBlockAsm10B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm10B + JB three_bytes_repeat_emit_encodeBlockAsm10B + +three_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +two_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm10B + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +one_byte_repeat_emit_encodeBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm10B + +memmove_long_repeat_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm10B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm10B + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B + +matchlen_bsf_16repeat_extend_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match8_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm10B + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm10B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match4_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm10B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm10B + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm10B + JB repeat_extend_forward_end_encodeBlockAsm10B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm10B + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match1_repeat_extend_encodeBlockAsm10B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm10B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm10B: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm10B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm10B + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm10B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm10B + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_match_repeat_encodeBlockAsm10B: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_match_repeat_encodeBlockAsm10B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_match_repeat_encodeBlockAsm10B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_as_copy_encodeBlockAsm10B: + // emitCopy + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +long_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeBlockAsm10B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm10B: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm10B + +no_repeat_found_encodeBlockAsm10B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm10B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm10B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm10B + +candidate3_match_encodeBlockAsm10B: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm10B + +candidate2_match_encodeBlockAsm10B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm10B + +match_extend_back_loop_encodeBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm10B + JMP match_extend_back_loop_encodeBlockAsm10B + +match_extend_back_end_encodeBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm10B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm10B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm10B + JB three_bytes_match_emit_encodeBlockAsm10B + +three_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm10B + +two_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm10B + JMP memmove_long_match_emit_encodeBlockAsm10B + +one_byte_match_emit_encodeBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm10B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm10B + +memmove_long_match_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm10B: +match_nolit_loop_encodeBlockAsm10B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm10B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B + +matchlen_bsf_16match_nolit_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_match8_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm10B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_match4_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm10B + JB match_nolit_end_encodeBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm10B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm10B + +matchlen_match1_match_nolit_encodeBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm10B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm10B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm10B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +long_offset_short_match_nolit_encodeBlockAsm10B: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBlockAsm10B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +emit_copy_three_match_nolit_encodeBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm10B + INCL DX + JMP search_loop_encodeBlockAsm10B + +emit_remainder_encodeBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm10B + JB three_bytes_emit_remainder_encodeBlockAsm10B + +three_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +two_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm10B + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +one_byte_emit_remainder_encodeBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm10B + +memmove_long_emit_remainder_encodeBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000008, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm8B + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm8B + +repeat_extend_back_loop_encodeBlockAsm8B: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm8B + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm8B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm8B + +repeat_extend_back_end_encodeBlockAsm8B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm8B + JB three_bytes_repeat_emit_encodeBlockAsm8B + +three_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +two_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm8B + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +one_byte_repeat_emit_encodeBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm8B + +memmove_long_repeat_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm8B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm8B + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B + +matchlen_bsf_16repeat_extend_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match8_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm8B + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm8B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match4_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm8B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm8B + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm8B + JB repeat_extend_forward_end_encodeBlockAsm8B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm8B + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match1_repeat_extend_encodeBlockAsm8B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm8B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm8B: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm8B + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm8B + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm8B + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_match_repeat_encodeBlockAsm8B: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_match_repeat_encodeBlockAsm8B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_as_copy_encodeBlockAsm8B: + // emitCopy + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +long_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeBlockAsm8B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm8B: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm8B + +no_repeat_found_encodeBlockAsm8B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm8B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm8B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm8B + +candidate3_match_encodeBlockAsm8B: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm8B + +candidate2_match_encodeBlockAsm8B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm8B + +match_extend_back_loop_encodeBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm8B + JMP match_extend_back_loop_encodeBlockAsm8B + +match_extend_back_end_encodeBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm8B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm8B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm8B + JB three_bytes_match_emit_encodeBlockAsm8B + +three_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm8B + +two_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm8B + JMP memmove_long_match_emit_encodeBlockAsm8B + +one_byte_match_emit_encodeBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm8B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm8B + +memmove_long_match_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm8B: +match_nolit_loop_encodeBlockAsm8B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm8B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B + +matchlen_bsf_16match_nolit_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_match8_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm8B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_match4_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm8B + JB match_nolit_end_encodeBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm8B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm8B + +matchlen_match1_match_nolit_encodeBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm8B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm8B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm8B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + MOVL R10, SI + LEAL -4(R10), R10 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +long_offset_short_match_nolit_encodeBlockAsm8B: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, SI + LEAL -4(R10), R10 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBlockAsm8B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +emit_copy_three_match_nolit_encodeBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm8B + INCL DX + JMP search_loop_encodeBlockAsm8B + +emit_remainder_encodeBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm8B + JB three_bytes_emit_remainder_encodeBlockAsm8B + +three_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +two_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm8B + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +one_byte_emit_remainder_encodeBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm8B + +memmove_long_emit_remainder_encodeBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00001200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm + LEAL 100(DX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm + +check_maxskip_ok_encodeBetterBlockAsm: + LEAL 1(DX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm: + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL 524288(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 524288(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm + +candidateS_match_encodeBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm + +match_extend_back_loop_encodeBetterBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm + JMP match_extend_back_loop_encodeBetterBlockAsm + +match_extend_back_end_encodeBetterBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_match8_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_match4_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm + JB match_nolit_end_encodeBetterBlockAsm + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm + +matchlen_match1_match_nolit_encodeBetterBlockAsm: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm + CMPL R12, $0x01 + JA match_length_ok_encodeBetterBlockAsm + CMPL R8, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm + MOVL 20(SP), DX + INCL DX + JMP search_loop_encodeBetterBlockAsm + +match_length_ok_encodeBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_match_emit_encodeBetterBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +four_bytes_match_emit_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +three_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +two_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm + JMP memmove_long_match_emit_encodeBetterBlockAsm + +one_byte_match_emit_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm + +memmove_long_match_emit_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm + CMPL R12, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm + MOVB $0xff, (CX) + MOVL R8, 1(CX) + LEAL -64(R12), R12 + ADDQ $0x05, CX + CMPL R12, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +four_bytes_remain_match_nolit_encodeBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm + XORL SI, SI + LEAL -1(SI)(R12*4), R12 + MOVB R12, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + MOVL R8, R9 + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +long_offset_short_match_nolit_encodeBetterBlockAsm: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +emit_copy_three_match_nolit_encodeBetterBlockAsm: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +match_is_repeat_encodeBetterBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_match_emit_repeat_encodeBetterBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +four_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +three_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +two_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +one_byte_match_emit_repeat_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + +memmove_long_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat +emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm + +repeat_five_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 524288(AX)(R11*4) + MOVL R14, 524288(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x2f, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm + +emit_remainder_encodeBetterBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBetterBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +four_bytes_emit_remainder_encodeBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +three_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +two_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +one_byte_emit_remainder_encodeBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm + +memmove_long_emit_remainder_encodeBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm4MB(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00001200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm4MB: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm4MB: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm4MB + LEAL 100(DX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm4MB + +check_maxskip_ok_encodeBetterBlockAsm4MB: + LEAL 1(DX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm4MB: + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL 524288(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 524288(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm4MB + +candidateS_match_encodeBetterBlockAsm4MB: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm4MB + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + +match_extend_back_loop_encodeBetterBlockAsm4MB: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm4MB + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm4MB + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + JMP match_extend_back_loop_encodeBetterBlockAsm4MB + +match_extend_back_end_encodeBetterBlockAsm4MB: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 4(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm4MB: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match8_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + JB match_nolit_end_encodeBetterBlockAsm4MB + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm4MB + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm4MB: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x01 + JA match_length_ok_encodeBetterBlockAsm4MB + CMPL R8, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm4MB + MOVL 20(SP), DX + INCL DX + JMP search_loop_encodeBetterBlockAsm4MB + +match_length_ok_encodeBetterBlockAsm4MB: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +three_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +two_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +one_byte_match_emit_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB + +memmove_long_match_emit_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm4MB: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + CMPL R12, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xff, (CX) + MOVL R8, 1(CX) + LEAL -64(R12), R12 + ADDQ $0x05, CX + CMPL R12, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + XORL SI, SI + LEAL -1(SI)(R12*4), R12 + MOVB R12, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +long_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +match_is_repeat_encodeBetterBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + +memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm4MB: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 524288(AX)(R11*4) + MOVL R14, 524288(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x2f, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm4MB + +emit_remainder_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 4(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +three_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +two_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +one_byte_emit_remainder_encodeBetterBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + +memmove_long_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000280, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL 65536(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 65536(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm12B + +candidateS_match_encodeBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm12B + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + +match_extend_back_loop_encodeBetterBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + JMP match_extend_back_loop_encodeBetterBlockAsm12B + +match_extend_back_end_encodeBetterBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm12B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match8_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + JB match_nolit_end_encodeBetterBlockAsm12B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeBetterBlockAsm12B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm12B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm12B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm12B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm12B + JB three_bytes_match_emit_encodeBetterBlockAsm12B + +three_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +two_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +one_byte_match_emit_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B + +memmove_long_match_emit_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm12B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +long_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeBetterBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +match_is_repeat_encodeBetterBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +one_byte_match_emit_repeat_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 65536(AX)(R11*4) + MOVL R14, 65536(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm12B: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm12B + +emit_remainder_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeBetterBlockAsm12B + +three_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +two_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +one_byte_emit_remainder_encodeBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + +memmove_long_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x000000a0, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL 16384(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 16384(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm10B + +candidateS_match_encodeBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm10B + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + +match_extend_back_loop_encodeBetterBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + JMP match_extend_back_loop_encodeBetterBlockAsm10B + +match_extend_back_end_encodeBetterBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm10B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match8_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + JB match_nolit_end_encodeBetterBlockAsm10B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeBetterBlockAsm10B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm10B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm10B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm10B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm10B + JB three_bytes_match_emit_encodeBetterBlockAsm10B + +three_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +two_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +one_byte_match_emit_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B + +memmove_long_match_emit_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm10B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +long_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeBetterBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +match_is_repeat_encodeBetterBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +one_byte_match_emit_repeat_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 16384(AX)(R11*4) + MOVL R14, 16384(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm10B: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm10B + +emit_remainder_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeBetterBlockAsm10B + +three_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +two_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +one_byte_emit_remainder_encodeBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + +memmove_long_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000028, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL 4096(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 4096(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm8B + +candidateS_match_encodeBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm8B + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + +match_extend_back_loop_encodeBetterBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + JMP match_extend_back_loop_encodeBetterBlockAsm8B + +match_extend_back_end_encodeBetterBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm8B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match8_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + JB match_nolit_end_encodeBetterBlockAsm8B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeBetterBlockAsm8B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm8B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm8B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm8B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm8B + JB three_bytes_match_emit_encodeBetterBlockAsm8B + +three_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +two_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +one_byte_match_emit_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B + +memmove_long_match_emit_encodeBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm8B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +long_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeBetterBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +match_is_repeat_encodeBetterBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +one_byte_match_emit_repeat_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (CX) + MOVL R9, -4(CX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R11 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 4096(AX)(R11*4) + MOVL R14, 4096(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm8B: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm8B + +emit_remainder_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeBetterBlockAsm8B + +three_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +two_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +one_byte_emit_remainder_encodeBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + +memmove_long_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm + +repeat_extend_back_loop_encodeSnappyBlockAsm: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm + +repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 5(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_repeat_emit_encodeSnappyBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +four_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVL SI, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +three_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +two_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +one_byte_repeat_emit_encodeSnappyBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm + +memmove_long_repeat_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + JB repeat_extend_forward_end_encodeSnappyBlockAsm + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xff, (CX) + MOVL DI, 1(CX) + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeSnappyBlockAsm + XORL R8, R8 + LEAL -1(R8)(SI*4), SI + MOVB SI, (CX) + MOVL DI, 1(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeSnappyBlockAsm + +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm + +no_repeat_found_encodeSnappyBlockAsm: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm + +candidate3_match_encodeSnappyBlockAsm: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm + +candidate2_match_encodeSnappyBlockAsm: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm + +match_extend_back_loop_encodeSnappyBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm + JMP match_extend_back_loop_encodeSnappyBlockAsm + +match_extend_back_end_encodeSnappyBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBlockAsm + MOVB $0xfc, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +four_bytes_match_emit_encodeSnappyBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW R8, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +three_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +two_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +one_byte_match_emit_encodeSnappyBlockAsm: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm + +memmove_long_match_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm: +match_nolit_loop_encodeSnappyBlockAsm: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_match8_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm + JB match_nolit_end_encodeSnappyBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm + MOVB $0xff, (CX) + MOVL SI, 1(CX) + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm + XORL DI, DI + LEAL -1(DI)(R10*4), R10 + MOVB R10, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBlockAsm: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm + INCL DX + JMP search_loop_encodeSnappyBlockAsm + +emit_remainder_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +four_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +three_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +two_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +one_byte_emit_remainder_encodeSnappyBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm + +memmove_long_emit_remainder_encodeSnappyBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm64K(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm64K: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm64K: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm64K + +repeat_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K + +repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm64K: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K + JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K + +three_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +two_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm64K + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +one_byte_repeat_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + +memmove_long_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + JB repeat_extend_forward_end_encodeSnappyBlockAsm64K + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm64K: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm64K + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm64K: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm64K + +no_repeat_found_encodeSnappyBlockAsm64K: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm64K + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm64K + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm64K + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm64K + +candidate3_match_encodeSnappyBlockAsm64K: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm64K + +candidate2_match_encodeSnappyBlockAsm64K: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + +match_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBlockAsm64K + +match_extend_back_end_encodeSnappyBlockAsm64K: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm64K: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm64K + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBlockAsm64K + +three_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +two_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +one_byte_match_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K + +memmove_long_match_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm64K: +match_nolit_loop_encodeSnappyBlockAsm64K: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match8_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + JB match_nolit_end_encodeSnappyBlockAsm64K + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm64K + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm64K: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm64K: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm64K + INCL DX + JMP search_loop_encodeSnappyBlockAsm64K + +emit_remainder_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000080, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm12B + +repeat_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B + +repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B + +three_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +two_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm12B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +one_byte_repeat_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + +memmove_long_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + JB repeat_extend_forward_end_encodeSnappyBlockAsm12B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm12B: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm12B: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm12B + +no_repeat_found_encodeSnappyBlockAsm12B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm12B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm12B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm12B + +candidate3_match_encodeSnappyBlockAsm12B: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm12B + +candidate2_match_encodeSnappyBlockAsm12B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + +match_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBlockAsm12B + +match_extend_back_end_encodeSnappyBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm12B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm12B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBlockAsm12B + +three_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +two_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +one_byte_match_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B + +memmove_long_match_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm12B: +match_nolit_loop_encodeSnappyBlockAsm12B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + JB match_nolit_end_encodeSnappyBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm12B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm12B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm12B + INCL DX + JMP search_loop_encodeSnappyBlockAsm12B + +emit_remainder_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000020, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm10B + +repeat_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B + +repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B + +three_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +two_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm10B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +one_byte_repeat_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + +memmove_long_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + JB repeat_extend_forward_end_encodeSnappyBlockAsm10B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm10B: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm10B: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm10B + +no_repeat_found_encodeSnappyBlockAsm10B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm10B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm10B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm10B + +candidate3_match_encodeSnappyBlockAsm10B: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm10B + +candidate2_match_encodeSnappyBlockAsm10B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + +match_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBlockAsm10B + +match_extend_back_end_encodeSnappyBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm10B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm10B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBlockAsm10B + +three_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +two_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +one_byte_match_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B + +memmove_long_match_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm10B: +match_nolit_loop_encodeSnappyBlockAsm10B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + JB match_nolit_end_encodeSnappyBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm10B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm10B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm10B + INCL DX + JMP search_loop_encodeSnappyBlockAsm10B + +emit_remainder_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000008, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm8B + +repeat_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B + +repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B + +three_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +two_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm8B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +one_byte_repeat_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + +memmove_long_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + JB repeat_extend_forward_end_encodeSnappyBlockAsm8B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm8B: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm8B: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm8B + +no_repeat_found_encodeSnappyBlockAsm8B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm8B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm8B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm8B + +candidate3_match_encodeSnappyBlockAsm8B: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm8B + +candidate2_match_encodeSnappyBlockAsm8B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + +match_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBlockAsm8B + +match_extend_back_end_encodeSnappyBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm8B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm8B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBlockAsm8B + +three_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +two_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +one_byte_match_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B + +memmove_long_match_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm8B: +match_nolit_loop_encodeSnappyBlockAsm8B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + JB match_nolit_end_encodeSnappyBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm8B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm8B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm8B + INCL DX + JMP search_loop_encodeSnappyBlockAsm8B + +emit_remainder_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00001200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JBE check_maxskip_ok_encodeSnappyBetterBlockAsm + LEAL 100(DX), SI + JMP check_maxskip_cont_encodeSnappyBetterBlockAsm + +check_maxskip_ok_encodeSnappyBetterBlockAsm: + LEAL 1(DX)(SI*1), SI + +check_maxskip_cont_encodeSnappyBetterBlockAsm: + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL 524288(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 524288(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm + +candidateS_match_encodeSnappyBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + +match_extend_back_loop_encodeSnappyBetterBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm + +match_extend_back_end_encodeSnappyBetterBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + JB match_nolit_end_encodeSnappyBetterBlockAsm + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL R12, $0x01 + JA match_length_ok_encodeSnappyBetterBlockAsm + CMPL R8, $0x0000ffff + JBE match_length_ok_encodeSnappyBetterBlockAsm + MOVL 20(SP), DX + INCL DX + JMP search_loop_encodeSnappyBetterBlockAsm + +match_length_ok_encodeSnappyBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBetterBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +four_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +three_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +two_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +one_byte_match_emit_encodeSnappyBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + +memmove_long_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xff, (CX) + MOVL R8, 1(CX) + LEAL -64(R12), R12 + ADDQ $0x05, CX + CMPL R12, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + XORL SI, SI + LEAL -1(SI)(R12*4), R12 + MOVB R12, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 524288(AX)(R11*4) + MOVL R14, 524288(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x2f, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm + +emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm64K(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000900, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm64K: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm64K: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x33, R11 + MOVL (AX)(R10*4), SI + MOVL 262144(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 262144(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm64K + +candidateS_match_encodeSnappyBetterBlockAsm64K: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + +match_extend_back_loop_encodeSnappyBetterBlockAsm64K: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K + +match_extend_back_end_encodeSnappyBetterBlockAsm64K: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm64K: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + JB match_nolit_end_encodeSnappyBetterBlockAsm64K + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm64K + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm64K: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K + +three_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +one_byte_match_emit_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + +memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x33, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x30, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x33, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 262144(AX)(R11*4) + MOVL R14, 262144(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x30, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm64K + +emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000280, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL 65536(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 65536(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm12B + +candidateS_match_encodeSnappyBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + +match_extend_back_loop_encodeSnappyBetterBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B + +match_extend_back_end_encodeSnappyBetterBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm12B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + JB match_nolit_end_encodeSnappyBetterBlockAsm12B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm12B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm12B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +one_byte_match_emit_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 65536(AX)(R11*4) + MOVL R14, 65536(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm12B + +emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x000000a0, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL 16384(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 16384(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm10B + +candidateS_match_encodeSnappyBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + +match_extend_back_loop_encodeSnappyBetterBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B + +match_extend_back_end_encodeSnappyBetterBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm10B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + JB match_nolit_end_encodeSnappyBetterBlockAsm10B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm10B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm10B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +one_byte_match_emit_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 16384(AX)(R11*4) + MOVL R14, 16384(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm10B + +emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000028, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL 4096(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 4096(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm8B + +candidateS_match_encodeSnappyBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + +match_extend_back_loop_encodeSnappyBetterBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B + +match_extend_back_end_encodeSnappyBetterBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm8B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + JB match_nolit_end_encodeSnappyBetterBlockAsm8B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm8B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm8B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +one_byte_match_emit_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 4096(AX)(R11*4) + MOVL R14, 4096(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm8B + +emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func calcBlockSize(src []byte, tmp *[32768]byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSize(SB), $24-40 + MOVQ tmp+24(FP), AX + XORQ CX, CX + MOVQ $0x00000100, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_calcBlockSize: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_calcBlockSize + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+0(FP), BX + +search_loop_calcBlockSize: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x33, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x33, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x33, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_calcBlockSize + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_calcBlockSize + +repeat_extend_back_loop_calcBlockSize: + CMPL DI, SI + JBE repeat_extend_back_end_calcBlockSize + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_calcBlockSize + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_calcBlockSize + +repeat_extend_back_end_calcBlockSize: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 5(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +repeat_dst_size_check_calcBlockSize: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_calcBlockSize + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_calcBlockSize + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSize + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_calcBlockSize + CMPL SI, $0x01000000 + JB four_bytes_repeat_emit_calcBlockSize + ADDQ $0x05, CX + JMP memmove_long_repeat_emit_calcBlockSize + +four_bytes_repeat_emit_calcBlockSize: + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_calcBlockSize + +three_bytes_repeat_emit_calcBlockSize: + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_calcBlockSize + +two_bytes_repeat_emit_calcBlockSize: + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_calcBlockSize + JMP memmove_long_repeat_emit_calcBlockSize + +one_byte_repeat_emit_calcBlockSize: + ADDQ $0x01, CX + +memmove_repeat_emit_calcBlockSize: + LEAQ (CX)(R8*1), CX + JMP emit_literal_done_repeat_emit_calcBlockSize + +memmove_long_repeat_emit_calcBlockSize: + LEAQ (CX)(R8*1), CX + +emit_literal_done_repeat_emit_calcBlockSize: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+8(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_calcBlockSize: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_calcBlockSize + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSize + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_calcBlockSize + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_calcBlockSize + +matchlen_bsf_16repeat_extend_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_match8_repeat_extend_calcBlockSize: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSize + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSize + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_calcBlockSize + +matchlen_bsf_8_repeat_extend_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_match4_repeat_extend_calcBlockSize: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSize + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_calcBlockSize + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_calcBlockSize: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSize + JB repeat_extend_forward_end_calcBlockSize + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_calcBlockSize + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_calcBlockSize + +matchlen_match1_repeat_extend_calcBlockSize: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_calcBlockSize + LEAL 1(R11), R11 + +repeat_extend_forward_end_calcBlockSize: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_calcBlockSize + +four_bytes_loop_back_repeat_as_copy_calcBlockSize: + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_calcBlockSize + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_calcBlockSize + JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize + +four_bytes_remain_repeat_as_copy_calcBlockSize: + TESTL SI, SI + JZ repeat_end_emit_calcBlockSize + XORL SI, SI + ADDQ $0x05, CX + JMP repeat_end_emit_calcBlockSize + +two_byte_offset_repeat_as_copy_calcBlockSize: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSize + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_calcBlockSize + +two_byte_offset_short_repeat_as_copy_calcBlockSize: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSize + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_calcBlockSize + ADDQ $0x02, CX + JMP repeat_end_emit_calcBlockSize + +emit_copy_three_repeat_as_copy_calcBlockSize: + ADDQ $0x03, CX + +repeat_end_emit_calcBlockSize: + MOVL DX, 12(SP) + JMP search_loop_calcBlockSize + +no_repeat_found_calcBlockSize: + CMPL (BX)(SI*1), DI + JEQ candidate_match_calcBlockSize + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_calcBlockSize + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_calcBlockSize + MOVL 20(SP), DX + JMP search_loop_calcBlockSize + +candidate3_match_calcBlockSize: + ADDL $0x02, DX + JMP candidate_match_calcBlockSize + +candidate2_match_calcBlockSize: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_calcBlockSize: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_calcBlockSize + +match_extend_back_loop_calcBlockSize: + CMPL DX, DI + JBE match_extend_back_end_calcBlockSize + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_calcBlockSize + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_calcBlockSize + JMP match_extend_back_loop_calcBlockSize + +match_extend_back_end_calcBlockSize: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +match_dst_size_check_calcBlockSize: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_calcBlockSize + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), DI + CMPL DI, $0x3c + JB one_byte_match_emit_calcBlockSize + CMPL DI, $0x00000100 + JB two_bytes_match_emit_calcBlockSize + CMPL DI, $0x00010000 + JB three_bytes_match_emit_calcBlockSize + CMPL DI, $0x01000000 + JB four_bytes_match_emit_calcBlockSize + ADDQ $0x05, CX + JMP memmove_long_match_emit_calcBlockSize + +four_bytes_match_emit_calcBlockSize: + ADDQ $0x04, CX + JMP memmove_long_match_emit_calcBlockSize + +three_bytes_match_emit_calcBlockSize: + ADDQ $0x03, CX + JMP memmove_long_match_emit_calcBlockSize + +two_bytes_match_emit_calcBlockSize: + ADDQ $0x02, CX + CMPL DI, $0x40 + JB memmove_match_emit_calcBlockSize + JMP memmove_long_match_emit_calcBlockSize + +one_byte_match_emit_calcBlockSize: + ADDQ $0x01, CX + +memmove_match_emit_calcBlockSize: + LEAQ (CX)(R9*1), CX + JMP emit_literal_done_match_emit_calcBlockSize + +memmove_long_match_emit_calcBlockSize: + LEAQ (CX)(R9*1), CX + +emit_literal_done_match_emit_calcBlockSize: +match_nolit_loop_calcBlockSize: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+8(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_calcBlockSize: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_calcBlockSize + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSize + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_calcBlockSize + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_calcBlockSize + +matchlen_bsf_16match_nolit_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_calcBlockSize + +matchlen_match8_match_nolit_calcBlockSize: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSize + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSize + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_calcBlockSize + +matchlen_bsf_8_match_nolit_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_calcBlockSize + +matchlen_match4_match_nolit_calcBlockSize: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSize + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_calcBlockSize + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_calcBlockSize: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSize + JB match_nolit_end_calcBlockSize + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_calcBlockSize + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_calcBlockSize + +matchlen_match1_match_nolit_calcBlockSize: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_calcBlockSize + LEAL 1(R10), R10 + +match_nolit_end_calcBlockSize: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_calcBlockSize + +four_bytes_loop_back_match_nolit_calcBlockSize: + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_calcBlockSize + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_calcBlockSize + JMP four_bytes_loop_back_match_nolit_calcBlockSize + +four_bytes_remain_match_nolit_calcBlockSize: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_calcBlockSize + XORL SI, SI + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_calcBlockSize + +two_byte_offset_match_nolit_calcBlockSize: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSize + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_calcBlockSize + +two_byte_offset_short_match_nolit_calcBlockSize: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSize + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_calcBlockSize + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_calcBlockSize + +emit_copy_three_match_nolit_calcBlockSize: + ADDQ $0x03, CX + +match_nolit_emitcopy_end_calcBlockSize: + CMPL DX, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +match_nolit_dst_ok_calcBlockSize: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x33, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x33, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_calcBlockSize + INCL DX + JMP search_loop_calcBlockSize + +emit_remainder_calcBlockSize: + MOVQ src_len+8(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +emit_remainder_ok_calcBlockSize: + MOVQ src_len+8(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_calcBlockSize + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), AX + CMPL AX, $0x3c + JB one_byte_emit_remainder_calcBlockSize + CMPL AX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSize + CMPL AX, $0x00010000 + JB three_bytes_emit_remainder_calcBlockSize + CMPL AX, $0x01000000 + JB four_bytes_emit_remainder_calcBlockSize + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_calcBlockSize + +four_bytes_emit_remainder_calcBlockSize: + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_calcBlockSize + +three_bytes_emit_remainder_calcBlockSize: + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_calcBlockSize + +two_bytes_emit_remainder_calcBlockSize: + ADDQ $0x02, CX + CMPL AX, $0x40 + JB memmove_emit_remainder_calcBlockSize + JMP memmove_long_emit_remainder_calcBlockSize + +one_byte_emit_remainder_calcBlockSize: + ADDQ $0x01, CX + +memmove_emit_remainder_calcBlockSize: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + JMP emit_literal_done_emit_remainder_calcBlockSize + +memmove_long_emit_remainder_calcBlockSize: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + +emit_literal_done_emit_remainder_calcBlockSize: + MOVQ CX, ret+32(FP) + RET + +// func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSizeSmall(SB), $24-40 + MOVQ tmp+24(FP), AX + XORQ CX, CX + MOVQ $0x00000010, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_calcBlockSizeSmall: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_calcBlockSizeSmall + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+0(FP), BX + +search_loop_calcBlockSizeSmall: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x37, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x37, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x37, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_calcBlockSizeSmall + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_calcBlockSizeSmall + +repeat_extend_back_loop_calcBlockSizeSmall: + CMPL DI, SI + JBE repeat_extend_back_end_calcBlockSizeSmall + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_calcBlockSizeSmall + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_calcBlockSizeSmall + +repeat_extend_back_end_calcBlockSizeSmall: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +repeat_dst_size_check_calcBlockSizeSmall: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_calcBlockSizeSmall + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSizeSmall + JB three_bytes_repeat_emit_calcBlockSizeSmall + +three_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +two_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_calcBlockSizeSmall + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +one_byte_repeat_emit_calcBlockSizeSmall: + ADDQ $0x01, CX + +memmove_repeat_emit_calcBlockSizeSmall: + LEAQ (CX)(R8*1), CX + JMP emit_literal_done_repeat_emit_calcBlockSizeSmall + +memmove_long_repeat_emit_calcBlockSizeSmall: + LEAQ (CX)(R8*1), CX + +emit_literal_done_repeat_emit_calcBlockSizeSmall: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+8(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_calcBlockSizeSmall + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall + +matchlen_bsf_16repeat_extend_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match8_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSizeSmall + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_calcBlockSizeSmall + +matchlen_bsf_8_repeat_extend_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match4_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSizeSmall + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_calcBlockSizeSmall + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSizeSmall + JB repeat_extend_forward_end_calcBlockSizeSmall + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_calcBlockSizeSmall + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match1_repeat_extend_calcBlockSizeSmall: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_calcBlockSizeSmall + LEAL 1(R11), R11 + +repeat_extend_forward_end_calcBlockSizeSmall: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_calcBlockSizeSmall: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall + +two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall: + MOVL SI, DI + SHLL $0x02, DI + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall + ADDQ $0x02, CX + JMP repeat_end_emit_calcBlockSizeSmall + +emit_copy_three_repeat_as_copy_calcBlockSizeSmall: + ADDQ $0x03, CX + +repeat_end_emit_calcBlockSizeSmall: + MOVL DX, 12(SP) + JMP search_loop_calcBlockSizeSmall + +no_repeat_found_calcBlockSizeSmall: + CMPL (BX)(SI*1), DI + JEQ candidate_match_calcBlockSizeSmall + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_calcBlockSizeSmall + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_calcBlockSizeSmall + MOVL 20(SP), DX + JMP search_loop_calcBlockSizeSmall + +candidate3_match_calcBlockSizeSmall: + ADDL $0x02, DX + JMP candidate_match_calcBlockSizeSmall + +candidate2_match_calcBlockSizeSmall: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_calcBlockSizeSmall: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_calcBlockSizeSmall + +match_extend_back_loop_calcBlockSizeSmall: + CMPL DX, DI + JBE match_extend_back_end_calcBlockSizeSmall + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_calcBlockSizeSmall + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_calcBlockSizeSmall + JMP match_extend_back_loop_calcBlockSizeSmall + +match_extend_back_end_calcBlockSizeSmall: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +match_dst_size_check_calcBlockSizeSmall: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_calcBlockSizeSmall + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), DI + CMPL DI, $0x3c + JB one_byte_match_emit_calcBlockSizeSmall + CMPL DI, $0x00000100 + JB two_bytes_match_emit_calcBlockSizeSmall + JB three_bytes_match_emit_calcBlockSizeSmall + +three_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x03, CX + JMP memmove_long_match_emit_calcBlockSizeSmall + +two_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x02, CX + CMPL DI, $0x40 + JB memmove_match_emit_calcBlockSizeSmall + JMP memmove_long_match_emit_calcBlockSizeSmall + +one_byte_match_emit_calcBlockSizeSmall: + ADDQ $0x01, CX + +memmove_match_emit_calcBlockSizeSmall: + LEAQ (CX)(R9*1), CX + JMP emit_literal_done_match_emit_calcBlockSizeSmall + +memmove_long_match_emit_calcBlockSizeSmall: + LEAQ (CX)(R9*1), CX + +emit_literal_done_match_emit_calcBlockSizeSmall: +match_nolit_loop_calcBlockSizeSmall: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+8(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_calcBlockSizeSmall + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall + +matchlen_bsf_16match_nolit_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_match8_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSizeSmall + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_calcBlockSizeSmall + +matchlen_bsf_8_match_nolit_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_match4_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSizeSmall + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_calcBlockSizeSmall + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSizeSmall + JB match_nolit_end_calcBlockSizeSmall + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_calcBlockSizeSmall + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_calcBlockSizeSmall + +matchlen_match1_match_nolit_calcBlockSizeSmall: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_calcBlockSizeSmall + LEAL 1(R10), R10 + +match_nolit_end_calcBlockSizeSmall: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_calcBlockSizeSmall: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_calcBlockSizeSmall + +two_byte_offset_short_match_nolit_calcBlockSizeSmall: + MOVL R10, SI + SHLL $0x02, SI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSizeSmall + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_calcBlockSizeSmall + +emit_copy_three_match_nolit_calcBlockSizeSmall: + ADDQ $0x03, CX + +match_nolit_emitcopy_end_calcBlockSizeSmall: + CMPL DX, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +match_nolit_dst_ok_calcBlockSizeSmall: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x37, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x37, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_calcBlockSizeSmall + INCL DX + JMP search_loop_calcBlockSizeSmall + +emit_remainder_calcBlockSizeSmall: + MOVQ src_len+8(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +emit_remainder_ok_calcBlockSizeSmall: + MOVQ src_len+8(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), AX + CMPL AX, $0x3c + JB one_byte_emit_remainder_calcBlockSizeSmall + CMPL AX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSizeSmall + JB three_bytes_emit_remainder_calcBlockSizeSmall + +three_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +two_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x02, CX + CMPL AX, $0x40 + JB memmove_emit_remainder_calcBlockSizeSmall + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +one_byte_emit_remainder_calcBlockSizeSmall: + ADDQ $0x01, CX + +memmove_emit_remainder_calcBlockSizeSmall: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + JMP emit_literal_done_emit_remainder_calcBlockSizeSmall + +memmove_long_emit_remainder_calcBlockSizeSmall: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + +emit_literal_done_emit_remainder_calcBlockSizeSmall: + MOVQ CX, ret+32(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JB one_byte_standalone + CMPL SI, $0x00000100 + JB two_bytes_standalone + CMPL SI, $0x00010000 + JB three_bytes_standalone + CMPL SI, $0x01000000 + JB four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JB repeat_three_standalone + CMPL DX, $0x00010100 + JB repeat_four_standalone + CMPL DX, $0x0100ffff + JB repeat_five_standalone + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone + CMPL DX, $0x40 + JBE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone + CMPL CX, $0x00000800 + JAE long_offset_short_standalone + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB CL, 1(AX) + MOVL CX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + SUBL $0x08, DX + + // emitRepeat + LEAL -4(DX), DX + JMP cant_repeat_two_offset_standalone_emit_copy_short_2b + +emit_repeat_again_standalone_emit_copy_short_2b: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short_2b + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short_2b + +cant_repeat_two_offset_standalone_emit_copy_short_2b: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short_2b + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short_2b + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short_2b + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short_2b + +repeat_five_standalone_emit_copy_short_2b: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short_2b: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short_2b: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short_2b: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +long_offset_short_standalone: + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +two_byte_offset_short_standalone: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JBE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone_snappy + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + +matchlen_loopback_16_standalone: + CMPL DX, $0x10 + JB matchlen_match8_standalone + MOVQ (AX)(SI*1), BX + MOVQ 8(AX)(SI*1), DI + XORQ (CX)(SI*1), BX + JNZ matchlen_bsf_8_standalone + XORQ 8(CX)(SI*1), DI + JNZ matchlen_bsf_16standalone + LEAL -16(DX), DX + LEAL 16(SI), SI + JMP matchlen_loopback_16_standalone + +matchlen_bsf_16standalone: +#ifdef GOAMD64_v3 + TZCNTQ DI, DI + +#else + BSFQ DI, DI + +#endif + SARQ $0x03, DI + LEAL 8(SI)(DI*1), SI + JMP gen_match_len_end + +matchlen_match8_standalone: + CMPL DX, $0x08 + JB matchlen_match4_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JNZ matchlen_bsf_8_standalone + LEAL -8(DX), DX + LEAL 8(SI), SI + JMP matchlen_match4_standalone + +matchlen_bsf_8_standalone: +#ifdef GOAMD64_v3 + TZCNTQ BX, BX + +#else + BSFQ BX, BX + +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x01 + JE matchlen_match1_standalone + JB gen_match_len_end + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL 2(SI), SI + SUBL $0x02, DX + JZ gen_match_len_end + +matchlen_match1_standalone: + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET + +// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + XORQ DI, DI + +lz4_s2_loop: + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ AX, CX + JAE lz4_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4_s2_ll_end + +lz4_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4_s2_ll_loop + +lz4_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x04, R10 + CMPQ R8, BX + JAE lz4_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_s2 + +four_bytes_lz4_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_s2 + +three_bytes_lz4_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_s2 + +two_bytes_lz4_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4_s2 + JMP memmove_long_lz4_s2 + +one_byte_lz4_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4_s2_memmove_move_33through64 + +emit_lit_memmove_lz4_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4_s2: + MOVQ R11, AX + JMP lz4_s2_lits_emit_done + +memmove_long_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4_s2large_big_loop_back + +emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4_s2_lits_emit_done: + MOVQ R8, DX + +lz4_s2_lits_done: + CMPQ DX, BX + JNE lz4_s2_match + CMPQ R10, $0x04 + JEQ lz4_s2_done + JMP lz4_s2_corrupt + +lz4_s2_match: + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4_s2_corrupt + CMPQ R9, SI + JA lz4_s2_corrupt + CMPQ R10, $0x13 + JNE lz4_s2_ml_done + +lz4_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ R8, $0xff + JEQ lz4_s2_ml_loop + +lz4_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +lz4_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +lz4_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + XORQ DI, DI + +lz4s_s2_loop: + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ AX, CX + JAE lz4s_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4s_s2_ll_end + +lz4s_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4s_s2_ll_loop + +lz4s_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x03, R10 + CMPQ R8, BX + JAE lz4s_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4s_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4s_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4s_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4s_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4s_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4s_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_s2 + +four_bytes_lz4s_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_s2 + +three_bytes_lz4s_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_s2 + +two_bytes_lz4s_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4s_s2 + JMP memmove_long_lz4s_s2 + +one_byte_lz4s_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64 + +emit_lit_memmove_lz4s_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4s_s2: + MOVQ R11, AX + JMP lz4s_s2_lits_emit_done + +memmove_long_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4s_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back + +emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4s_s2_lits_emit_done: + MOVQ R8, DX + +lz4s_s2_lits_done: + CMPQ DX, BX + JNE lz4s_s2_match + CMPQ R10, $0x03 + JEQ lz4s_s2_done + JMP lz4s_s2_corrupt + +lz4s_s2_match: + CMPQ R10, $0x03 + JEQ lz4s_s2_loop + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4s_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4s_s2_corrupt + CMPQ R9, SI + JA lz4s_s2_corrupt + CMPQ R10, $0x12 + JNE lz4s_s2_ml_done + +lz4s_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ R8, $0xff + JEQ lz4s_s2_ml_loop + +lz4s_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4s_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +lz4s_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +lz4s_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + +lz4_snappy_loop: + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ AX, CX + JAE lz4_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4_snappy_ll_end + +lz4_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4_snappy_ll_loop + +lz4_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x04, R9 + CMPQ DI, BX + JAE lz4_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_snappy + +four_bytes_lz4_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_snappy + +three_bytes_lz4_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_snappy + +two_bytes_lz4_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4_snappy + JMP memmove_long_lz4_snappy + +one_byte_lz4_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4_snappy: + MOVQ R10, AX + JMP lz4_snappy_lits_emit_done + +memmove_long_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4_snappy_lits_emit_done: + MOVQ DI, DX + +lz4_snappy_lits_done: + CMPQ DX, BX + JNE lz4_snappy_match + CMPQ R9, $0x04 + JEQ lz4_snappy_done + JMP lz4_snappy_corrupt + +lz4_snappy_match: + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4_snappy_corrupt + CMPQ R8, SI + JA lz4_snappy_corrupt + CMPQ R9, $0x13 + JNE lz4_snappy_ml_done + +lz4_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4_snappy_ml_loop + +lz4_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4_snappy_loop + +lz4_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + +lz4s_snappy_loop: + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ AX, CX + JAE lz4s_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4s_snappy_ll_end + +lz4s_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4s_snappy_ll_loop + +lz4s_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x03, R9 + CMPQ DI, BX + JAE lz4s_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4s_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4s_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4s_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4s_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4s_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4s_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_snappy + +four_bytes_lz4s_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_snappy + +three_bytes_lz4s_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_snappy + +two_bytes_lz4s_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4s_snappy + JMP memmove_long_lz4s_snappy + +one_byte_lz4s_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4s_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4s_snappy: + MOVQ R10, AX + JMP lz4s_snappy_lits_emit_done + +memmove_long_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4s_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4s_snappy_lits_emit_done: + MOVQ DI, DX + +lz4s_snappy_lits_done: + CMPQ DX, BX + JNE lz4s_snappy_match + CMPQ R9, $0x03 + JEQ lz4s_snappy_done + JMP lz4s_snappy_corrupt + +lz4s_snappy_match: + CMPQ R9, $0x03 + JEQ lz4s_snappy_loop + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4s_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4s_snappy_corrupt + CMPQ R8, SI + JA lz4s_snappy_corrupt + CMPQ R9, $0x12 + JNE lz4s_snappy_ml_done + +lz4s_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4s_snappy_ml_loop + +lz4s_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4s_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4s_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4s_snappy_loop + +lz4s_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go new file mode 100644 index 000000000000..fb7db25315ae --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -0,0 +1,602 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "sort" +) + +const ( + S2IndexHeader = "s2idx\x00" + S2IndexTrailer = "\x00xdi2s" + maxIndexEntries = 1 << 16 + // If distance is less than this, we do not add the entry. + minIndexDist = 1 << 20 +) + +// Index represents an S2/Snappy index. +type Index struct { + TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. + info []struct { + compressedOffset int64 + uncompressedOffset int64 + } + estBlockUncomp int64 +} + +func (i *Index) reset(maxBlock int) { + i.estBlockUncomp = int64(maxBlock) + i.TotalCompressed = -1 + i.TotalUncompressed = -1 + if len(i.info) > 0 { + i.info = i.info[:0] + } +} + +// allocInfos will allocate an empty slice of infos. +func (i *Index) allocInfos(n int) { + if n > maxIndexEntries { + panic("n > maxIndexEntries") + } + i.info = make([]struct { + compressedOffset int64 + uncompressedOffset int64 + }, 0, n) +} + +// add an uncompressed and compressed pair. +// Entries must be sent in order. +func (i *Index) add(compressedOffset, uncompressedOffset int64) error { + if i == nil { + return nil + } + lastIdx := len(i.info) - 1 + if lastIdx >= 0 { + latest := i.info[lastIdx] + if latest.uncompressedOffset == uncompressedOffset { + // Uncompressed didn't change, don't add entry, + // but update start index. + latest.compressedOffset = compressedOffset + i.info[lastIdx] = latest + return nil + } + if latest.uncompressedOffset > uncompressedOffset { + return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.compressedOffset > compressedOffset { + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.compressedOffset, compressedOffset) + } + if latest.uncompressedOffset+minIndexDist > uncompressedOffset { + // Only add entry if distance is large enough. + return nil + } + } + i.info = append(i.info, struct { + compressedOffset int64 + uncompressedOffset int64 + }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) + return nil +} + +// Find the offset at or before the wanted (uncompressed) offset. +// If offset is 0 or positive it is the offset from the beginning of the file. +// If the uncompressed size is known, the offset must be within the file. +// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. +// If the offset is negative, it is interpreted as the distance from the end of the file, +// where -1 represents the last byte. +// If offset from the end of the file is requested, but size is unknown, +// ErrUnsupported will be returned. +func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { + if i.TotalUncompressed < 0 { + return 0, 0, ErrCorrupt + } + if offset < 0 { + offset = i.TotalUncompressed + offset + if offset < 0 { + return 0, 0, io.ErrUnexpectedEOF + } + } + if offset > i.TotalUncompressed { + return 0, 0, io.ErrUnexpectedEOF + } + if len(i.info) > 200 { + n := sort.Search(len(i.info), func(n int) bool { + return i.info[n].uncompressedOffset > offset + }) + if n == 0 { + n = 1 + } + return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil + } + for _, info := range i.info { + if info.uncompressedOffset > offset { + break + } + compressedOff = info.compressedOffset + uncompressedOff = info.uncompressedOffset + } + return compressedOff, uncompressedOff, nil +} + +// reduce to stay below maxIndexEntries +func (i *Index) reduce() { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist { + return + } + + // Algorithm, keep 1, remove removeN entries... + removeN := (len(i.info) + 1) / maxIndexEntries + src := i.info + j := 0 + + // Each block should be at least 1MB, but don't reduce below 1000 entries. + for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 { + removeN++ + } + for idx := 0; idx < len(src); idx++ { + i.info[j] = src[idx] + j++ + idx += removeN + } + i.info = i.info[:j] + // Update maxblock estimate. + i.estBlockUncomp += i.estBlockUncomp * int64(removeN) +} + +func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { + i.reduce() + var tmp [binary.MaxVarintLen64]byte + + initSize := len(b) + // We make the start a skippable header+size. + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + // Total Uncompressed size + n := binary.PutVarint(tmp[:], uncompTotal) + b = append(b, tmp[:n]...) + // Total Compressed size + n = binary.PutVarint(tmp[:], compTotal) + b = append(b, tmp[:n]...) + // Put EstBlockUncomp size + n = binary.PutVarint(tmp[:], i.estBlockUncomp) + b = append(b, tmp[:n]...) + // Put length + n = binary.PutVarint(tmp[:], int64(len(i.info))) + b = append(b, tmp[:n]...) + + // Check if we should add uncompressed offsets + var hasUncompressed byte + for idx, info := range i.info { + if idx == 0 { + if info.uncompressedOffset != 0 { + hasUncompressed = 1 + break + } + continue + } + if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { + hasUncompressed = 1 + break + } + } + b = append(b, hasUncompressed) + + // Add each entry + if hasUncompressed == 1 { + for idx, info := range i.info { + uOff := info.uncompressedOffset + if idx > 0 { + prev := i.info[idx-1] + uOff -= prev.uncompressedOffset + (i.estBlockUncomp) + } + n = binary.PutVarint(tmp[:], uOff) + b = append(b, tmp[:n]...) + } + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + for idx, info := range i.info { + cOff := info.compressedOffset + if idx > 0 { + prev := i.info[idx-1] + cOff -= prev.compressedOffset + cPredict + // Update compressed size prediction, with half the error. + cPredict += cOff / 2 + } + n = binary.PutVarint(tmp[:], cOff) + b = append(b, tmp[:n]...) + } + + // Add Total Size. + // Stored as fixed size for easier reading. + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + // Update size + chunkLen := len(b) - initSize - skippableFrameHeader + b[initSize+1] = uint8(chunkLen >> 0) + b[initSize+2] = uint8(chunkLen >> 8) + b[initSize+3] = uint8(chunkLen >> 16) + //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) + return b +} + +// Load a binary index. +// A zero value Index can be used or a previous one can be reused. +func (i *Index) Load(b []byte) ([]byte, error) { + if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + if b[0] != ChunkTypeIndex { + return b, ErrCorrupt + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return b, io.ErrUnexpectedEOF + } + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return b, ErrUnsupported + } + b = b[len(S2IndexHeader):] + + // Total Uncompressed + if v, n := binary.Varint(b); n <= 0 || v < 0 { + return b, ErrCorrupt + } else { + i.TotalUncompressed = v + b = b[n:] + } + + // Total Compressed + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + i.TotalCompressed = v + b = b[n:] + } + + // Read EstBlockUncomp + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 { + return b, ErrCorrupt + } + i.estBlockUncomp = v + b = b[n:] + } + + var entries int + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 || v > maxIndexEntries { + return b, ErrCorrupt + } + entries = int(v) + b = b[n:] + } + if cap(i.info) < entries { + i.allocInfos(entries) + } + i.info = i.info[:entries] + + if len(b) < 1 { + return b, io.ErrUnexpectedEOF + } + hasUncompressed := b[0] + b = b[1:] + if hasUncompressed&1 != hasUncompressed { + return b, ErrCorrupt + } + + // Add each uncompressed entry + for idx := range i.info { + var uOff int64 + if hasUncompressed != 0 { + // Load delta + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + uOff = v + b = b[n:] + } + } + + if idx > 0 { + prev := i.info[idx-1].uncompressedOffset + uOff += prev + (i.estBlockUncomp) + if uOff <= prev { + return b, ErrCorrupt + } + } + if uOff < 0 { + return b, ErrCorrupt + } + i.info[idx].uncompressedOffset = uOff + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + // Add each compressed entry + for idx := range i.info { + var cOff int64 + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + cOff = v + b = b[n:] + } + + if idx > 0 { + // Update compressed size prediction, with half the error. + cPredictNew := cPredict + cOff/2 + + prev := i.info[idx-1].compressedOffset + cOff += prev + cPredict + if cOff <= prev { + return b, ErrCorrupt + } + cPredict = cPredictNew + } + if cOff < 0 { + return b, ErrCorrupt + } + i.info[idx].compressedOffset = cOff + } + if len(b) < 4+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + // Skip size... + b = b[4:] + + // Check trailer... + if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return b, ErrCorrupt + } + return b[len(S2IndexTrailer):], nil +} + +// LoadStream will load an index from the end of the supplied stream. +// ErrUnsupported will be returned if the signature cannot be found. +// ErrCorrupt will be returned if unexpected values are found. +// io.ErrUnexpectedEOF is returned if there are too few bytes. +// IO errors are returned as-is. +func (i *Index) LoadStream(rs io.ReadSeeker) error { + // Go to end. + _, err := rs.Seek(-10, io.SeekEnd) + if err != nil { + return err + } + var tmp [10]byte + _, err = io.ReadFull(rs, tmp[:]) + if err != nil { + return err + } + // Check trailer... + if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return ErrUnsupported + } + sz := binary.LittleEndian.Uint32(tmp[:4]) + if sz > maxChunkSize+skippableFrameHeader { + return ErrCorrupt + } + _, err = rs.Seek(-int64(sz), io.SeekEnd) + if err != nil { + return err + } + + // Read index. + buf := make([]byte, sz) + _, err = io.ReadFull(rs, buf) + if err != nil { + return err + } + _, err = i.Load(buf) + return err +} + +// IndexStream will return an index for a stream. +// The stream structure will be checked, but +// data within blocks is not verified. +// The returned index can either be appended to the end of the stream +// or stored separately. +func IndexStream(r io.Reader) ([]byte, error) { + var i Index + var buf [maxChunkSize]byte + var readHeader bool + for { + _, err := io.ReadFull(r, buf[:4]) + if err != nil { + if err == io.EOF { + return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil + } + return nil, err + } + // Start of this chunk. + startChunk := i.TotalCompressed + i.TotalCompressed += 4 + + chunkType := buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + return nil, ErrCorrupt + } + readHeader = true + } + chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 + if chunkLen < checksumSize { + return nil, ErrCorrupt + } + + i.TotalCompressed += int64(chunkLen) + _, err = io.ReadFull(r, buf[:chunkLen]) + if err != nil { + return nil, io.ErrUnexpectedEOF + } + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + // Skip checksum. + dLen, err := DecodedLen(buf[checksumSize:]) + if err != nil { + return nil, err + } + if dLen > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(dLen) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(dLen) + continue + case chunkTypeUncompressedData: + n2 := chunkLen - checksumSize + if n2 > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(n2) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(n2) + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + return nil, ErrCorrupt + } + + if string(buf[:len(magicBody)]) != magicBody { + if string(buf[:len(magicBody)]) != magicBodySnappy { + return nil, ErrCorrupt + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + return nil, ErrUnsupported + } + if chunkLen > maxChunkSize { + return nil, ErrUnsupported + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + } +} + +// JSON returns the index as JSON text. +func (i *Index) JSON() []byte { + type offset struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + } + x := struct { + TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. + Offsets []offset `json:"offsets"` + EstBlockUncomp int64 `json:"est_block_uncompressed"` + }{ + TotalUncompressed: i.TotalUncompressed, + TotalCompressed: i.TotalCompressed, + EstBlockUncomp: i.estBlockUncomp, + } + for _, v := range i.info { + x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) + } + b, _ := json.MarshalIndent(x, "", " ") + return b +} + +// RemoveIndexHeaders will trim all headers and trailers from a given index. +// This is expected to save 20 bytes. +// These can be restored using RestoreIndexHeaders. +// This removes a layer of security, but is the most compact representation. +// Returns nil if headers contains errors. +// The returned slice references the provided slice. +func RemoveIndexHeaders(b []byte) []byte { + const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4 + if len(b) <= save { + return nil + } + if b[0] != ChunkTypeIndex { + return nil + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return nil + } + b = b[:chunkLen] + + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return nil + } + b = b[len(S2IndexHeader):] + if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) { + return nil + } + b = bytes.TrimSuffix(b, []byte(S2IndexTrailer)) + + if len(b) < 4 { + return nil + } + return b[:len(b)-4] +} + +// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders. +// No error checking is performed on the input. +// If a 0 length slice is sent, it is returned without modification. +func RestoreIndexHeaders(in []byte) []byte { + if len(in) == 0 { + return in + } + b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4) + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + b = append(b, in...) + + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + chunkLen := len(b) - skippableFrameHeader + b[1] = uint8(chunkLen >> 0) + b[2] = uint8(chunkLen >> 8) + b[3] = uint8(chunkLen >> 16) + return b +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go new file mode 100644 index 000000000000..46ed908e3c00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go @@ -0,0 +1,585 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// LZ4Converter provides conversion from LZ4 blocks as defined here: +// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md +type LZ4Converter struct { +} + +// ErrDstTooSmall is returned when provided destination is too small. +var ErrDstTooSmall = errors.New("s2: destination too small") + +// ConvertBlock will convert an LZ4 block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4 block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat16(dst []byte, offset uint16, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat16(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint16 +// 4 <= length && length <= math.MaxUint32 +func emitCopy16(dst []byte, offset uint16, length int) int { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat16(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralGo(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go new file mode 100644 index 000000000000..000f39719c5c --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go @@ -0,0 +1,467 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "fmt" +) + +// LZ4sConverter provides conversion from LZ4s. +// (Intel modified LZ4 Blocks) +// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf +// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format. +// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData. +// The LZ4s block returned by the Intel® QAT hardware can be used by an external +// software post-processing to generate other compressed data formats. +// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses +// the same high-level formatting as LZ4 block format with the following encoding changes: +// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte. +// ONLY "Min match of 4 bytes" is supported. +type LZ4sConverter struct { +} + +// ConvertBlock will convert an LZ4s block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4s block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go new file mode 100644 index 000000000000..4d01c4190cce --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -0,0 +1,1075 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "runtime" + "sync" +) + +// ErrCantSeek is returned if the stream cannot be seeked. +type ErrCantSeek struct { + Reason string +} + +// Error returns the error as string. +func (e ErrCantSeek) Error() string { + return fmt.Sprintf("s2: Can't seek because %s", e.Reason) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. +func NewReader(r io.Reader, opts ...ReaderOption) *Reader { + nr := Reader{ + r: r, + maxBlock: maxBlockSize, + } + for _, opt := range opts { + if err := opt(&nr); err != nil { + nr.err = err + return &nr + } + } + nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize + if nr.lazyBuf > 0 { + nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) + } else { + nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) + } + nr.readHeader = nr.ignoreStreamID + nr.paramsOK = true + return &nr +} + +// ReaderOption is an option for creating a decoder. +type ReaderOption func(*Reader) error + +// ReaderMaxBlockSize allows to control allocations if the stream +// has been compressed with a smaller WriterBlockSize, or with the default 1MB. +// Blocks must be this size or smaller to decompress, +// otherwise the decoder will return ErrUnsupported. +// +// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). +// +// Default is the maximum limit of 4MB. +func ReaderMaxBlockSize(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize <= 0 { + return errors.New("s2: block size too large. Must be <= 4MB and > 0") + } + if r.lazyBuf == 0 && blockSize < defaultBlockSize { + r.lazyBuf = blockSize + } + r.maxBlock = blockSize + return nil + } +} + +// ReaderAllocBlock allows to control upfront stream allocations +// and not allocate for frames bigger than this initially. +// If frames bigger than this is seen a bigger buffer will be allocated. +// +// Default is 1MB, which is default output size. +func ReaderAllocBlock(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize < 1024 { + return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") + } + r.lazyBuf = blockSize + return nil + } +} + +// ReaderIgnoreStreamIdentifier will make the reader skip the expected +// stream identifier at the beginning of the stream. +// This can be used when serving a stream that has been forwarded to a specific point. +func ReaderIgnoreStreamIdentifier() ReaderOption { + return func(r *Reader) error { + r.ignoreStreamID = true + return nil + } +} + +// ReaderSkippableCB will register a callback for chuncks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. +func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { + return func(r *Reader) error { + if id < 0x80 || id > 0xfd { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") + } + r.skippableCB[id-0x80] = fn + return nil + } +} + +// ReaderIgnoreCRC will make the reader skip CRC calculation and checks. +func ReaderIgnoreCRC() ReaderOption { + return func(r *Reader) error { + r.ignoreCRC = true + return nil + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + skippableCB [0xff - 0x80]func(r io.Reader) error + blockStart int64 // Uncompressed offset at start of current. + index *Index + + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + // maximum block size allowed. + maxBlock int + // maximum expected buffer size. + maxBufSize int + // alloc a buffer this size if > 0. + lazyBuf int + readHeader bool + paramsOK bool + snappyFrame bool + ignoreStreamID bool + ignoreCRC bool +} + +// GetBufferCapacity returns the capacity of the internal buffer. +// This might be useful to know when reusing the same reader in combination +// with the lazy buffer option. +func (r *Reader) GetBufferCapacity() int { + return cap(r.buf) +} + +// ensureBufferSize will ensure that the buffer can take at least n bytes. +// If false is returned the buffer exceeds maximum allowed size. +func (r *Reader) ensureBufferSize(n int) bool { + if n > r.maxBufSize { + r.err = ErrCorrupt + return false + } + if cap(r.buf) >= n { + return true + } + // Realloc buffer. + r.buf = make([]byte, n) + return true +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + if !r.paramsOK { + return + } + r.index = nil + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.blockStart = 0 + r.readHeader = r.ignoreStreamID +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// skippable will skip n bytes. +// If the supplied reader supports seeking that is used. +// tmp is used as a temporary buffer for reading. +// The supplied slice does not need to be the size of the read. +func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { + if id < 0x80 { + r.err = fmt.Errorf("internal error: skippable id < 0x80") + return false + } + if fn := r.skippableCB[id-0x80]; fn != nil { + rd := io.LimitReader(r.r, int64(n)) + r.err = fn(rd) + if r.err != nil { + return false + } + _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) + return r.err == nil + } + if rs, ok := r.r.(io.ReadSeeker); ok { + _, err := rs.Seek(int64(n), io.SeekCurrent) + if err == nil { + return true + } + if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + return false + } + } + for n > 0 { + if n < len(tmp) { + tmp = tmp[:n] + } + if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + n -= len(tmp) + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } +} + +// DecodeConcurrent will decode the full stream to w. +// This function should not be combined with reading, seeking or other operations. +// Up to 'concurrent' goroutines will be used. +// If <= 0, runtime.NumCPU will be used. +// On success the number of bytes decompressed nil and is returned. +// This is mainly intended for bigger streams. +func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) { + if r.i > 0 || r.j > 0 || r.blockStart > 0 { + return 0, errors.New("DecodeConcurrent called after ") + } + if concurrent <= 0 { + concurrent = runtime.NumCPU() + } + + // Write to output + var errMu sync.Mutex + var aErr error + setErr := func(e error) (ok bool) { + errMu.Lock() + defer errMu.Unlock() + if e == nil { + return aErr == nil + } + if aErr == nil { + aErr = e + } + return false + } + hasErr := func() (ok bool) { + errMu.Lock() + v := aErr != nil + errMu.Unlock() + return v + } + + var aWritten int64 + toRead := make(chan []byte, concurrent) + writtenBlocks := make(chan []byte, concurrent) + queue := make(chan chan []byte, concurrent) + reUse := make(chan chan []byte, concurrent) + for i := 0; i < concurrent; i++ { + toRead <- make([]byte, 0, r.maxBufSize) + writtenBlocks <- make([]byte, 0, r.maxBufSize) + reUse <- make(chan []byte, 1) + } + // Writer + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for toWrite := range queue { + entry := <-toWrite + reUse <- toWrite + if hasErr() || entry == nil { + if entry != nil { + writtenBlocks <- entry + } + continue + } + if hasErr() { + writtenBlocks <- entry + continue + } + n, err := w.Write(entry) + want := len(entry) + writtenBlocks <- entry + if err != nil { + setErr(err) + continue + } + if n != want { + setErr(io.ErrShortWrite) + continue + } + aWritten += int64(n) + } + }() + + defer func() { + if r.err != nil { + setErr(r.err) + } else if err != nil { + setErr(err) + } + close(queue) + wg.Wait() + if err == nil { + err = aErr + } + written = aWritten + }() + + // Reader + for !hasErr() { + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = nil + } + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + orgBuf := <-toRead + buf := orgBuf[:chunkLen] + + if !r.readFull(buf, false) { + return 0, r.err + } + + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + wg.Add(1) + + decoded := <-writtenBlocks + entry := <-reUse + queue <- entry + go func() { + defer wg.Done() + decoded = decoded[:n] + _, err := Decode(decoded, buf) + toRead <- orgBuf + if err != nil { + writtenBlocks <- decoded + setErr(err) + entry <- nil + return + } + if !r.ignoreCRC && crc(decoded) != checksum { + writtenBlocks <- decoded + setErr(ErrCRC) + entry <- nil + return + } + entry <- decoded + }() + continue + + case chunkTypeUncompressedData: + + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + // Grab write buffer + orgBuf := <-writtenBlocks + buf := orgBuf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read content. + n := chunkLen - checksumSize + + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + // Read uncompressed + buf = orgBuf[:n] + if !r.readFull(buf, false) { + return 0, r.err + } + + if !r.ignoreCRC && crc(buf) != checksum { + r.err = ErrCRC + return 0, r.err + } + entry := <-reUse + queue <- entry + entry <- buf + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } + return 0, r.err +} + +// Skip will skip n bytes forward in the decompressed output. +// For larger skips this consumes less CPU and is faster than reading output and discarding it. +// CRC is not checked on skipped blocks. +// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. +// If a decoding error is encountered subsequent calls to Read will also fail. +func (r *Reader) Skip(n int64) error { + if n < 0 { + return errors.New("attempted negative skip") + } + if r.err != nil { + return r.err + } + + for n > 0 { + if r.i < r.j { + // Skip in buffer. + // decoded[i:j] contains decoded bytes that have not yet been passed on. + left := int64(r.j - r.i) + if left >= n { + tmp := int64(r.i) + n + if tmp > math.MaxInt32 { + return errors.New("s2: internal overflow in skip") + } + r.i = int(tmp) + return nil + } + n -= int64(r.j - r.i) + r.i = r.j + } + + // Buffer empty; read blocks until we have content. + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = io.ErrUnexpectedEOF + } + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + dLen, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if dLen > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + // Check if destination is within this block + if int64(dLen) > n { + if len(r.decoded) < dLen { + r.decoded = make([]byte, dLen) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:dLen]) != checksum { + r.err = ErrCorrupt + return r.err + } + } else { + // Skip block completely + n -= int64(dLen) + r.blockStart += int64(dLen) + dLen = 0 + } + r.i, r.j = 0, dLen + continue + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err != nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n2 := chunkLen - checksumSize + if n2 > len(r.decoded) { + if n2 > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + r.decoded = make([]byte, n2) + } + if !r.readFull(r.decoded[:n2], false) { + return r.err + } + if int64(n2) < n { + if crc(r.decoded[:n2]) != checksum { + r.err = ErrCorrupt + return r.err + } + } + r.i, r.j = 0, n2 + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return r.err + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + if chunkLen > maxChunkSize { + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return r.err + } + } + return nil +} + +// ReadSeeker provides random or forward seeking in compressed content. +// See Reader.ReadSeeker +type ReadSeeker struct { + *Reader + readAtMu sync.Mutex +} + +// ReadSeeker will return an io.ReadSeeker and io.ReaderAt +// compatible version of the reader. +// If 'random' is specified the returned io.Seeker can be used for +// random seeking, otherwise only forward seeking is supported. +// Enabling random seeking requires the original input to support +// the io.Seeker interface. +// A custom index can be specified which will be used if supplied. +// When using a custom index, it will not be read from the input stream. +// The ReadAt position will affect regular reads and the current position of Seek. +// So using Read after ReadAt will continue from where the ReadAt stopped. +// No functions should be used concurrently. +// The returned ReadSeeker contains a shallow reference to the existing Reader, +// meaning changes performed to one is reflected in the other. +func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { + // Read index if provided. + if len(index) != 0 { + if r.index == nil { + r.index = &Index{} + } + if _, err := r.index.Load(index); err != nil { + return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} + } + } + + // Check if input is seekable + rs, ok := r.r.(io.ReadSeeker) + if !ok { + if !random { + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream isn't seekable"} + } + + if r.index != nil { + // Seekable and index, ok... + return &ReadSeeker{Reader: r}, nil + } + + // Load from stream. + r.index = &Index{} + + // Read current position. + pos, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + err = r.index.LoadStream(rs) + if err != nil { + if err == ErrUnsupported { + // If we don't require random seeking, reset input and return. + if !random { + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()} + } + r.index = nil + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream does not contain an index"} + } + return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} + } + + // reset position. + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + return &ReadSeeker{Reader: r}, nil +} + +// Seek allows seeking in compressed data. +func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { + if r.err != nil { + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil + } + + // Calculate absolute offset. + absOffset := offset + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + absOffset = r.blockStart + int64(r.i) + offset + case io.SeekEnd: + if r.index == nil { + return 0, ErrUnsupported + } + absOffset = r.index.TotalUncompressed + offset + default: + r.err = ErrUnsupported + return 0, r.err + } + + if absOffset < 0 { + return 0, errors.New("seek before start of file") + } + + if !r.readHeader { + // Make sure we read the header. + _, r.err = r.Read([]byte{}) + if r.err != nil { + return 0, r.err + } + } + + // If we are inside current block no need to seek. + // This includes no offset changes. + if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) { + r.i = int(absOffset - r.blockStart) + return r.blockStart + int64(r.i), nil + } + + rs, ok := r.r.(io.ReadSeeker) + if r.index == nil || !ok { + currOffset := r.blockStart + int64(r.i) + if absOffset >= currOffset { + err := r.Skip(absOffset - currOffset) + return r.blockStart + int64(r.i), err + } + return 0, ErrUnsupported + } + + // We can seek and we have an index. + c, u, err := r.index.Find(absOffset) + if err != nil { + return r.blockStart + int64(r.i), err + } + + // Seek to next block + _, err = rs.Seek(c, io.SeekStart) + if err != nil { + return 0, err + } + + r.i = r.j // Remove rest of current block. + r.blockStart = u - int64(r.j) // Adjust current block start for accounting. + if u < absOffset { + // Forward inside block + return absOffset, r.Skip(absOffset - u) + } + if u > absOffset { + return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset) + } + return absOffset, nil +} + +// ReadAt reads len(p) bytes into p starting at offset off in the +// underlying input source. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. +// +// When ReadAt returns n < len(p), it returns a non-nil error +// explaining why more bytes were not returned. In this respect, +// ReadAt is stricter than Read. +// +// Even if ReadAt returns n < len(p), it may use all of p as scratch +// space during the call. If some data is available but not len(p) bytes, +// ReadAt blocks until either all the data is available or an error occurs. +// In this respect ReadAt is different from Read. +// +// If the n = len(p) bytes returned by ReadAt are at the end of the +// input source, ReadAt may return either err == EOF or err == nil. +// +// If ReadAt is reading from an input source with a seek offset, +// ReadAt should not affect nor be affected by the underlying +// seek offset. +// +// Clients of ReadAt can execute parallel ReadAt calls on the +// same input source. This is however not recommended. +func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) { + r.readAtMu.Lock() + defer r.readAtMu.Unlock() + _, err := r.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + n := 0 + for n < len(p) { + n2, err := r.Read(p[n:]) + if err != nil { + // This will include io.EOF + return n + n2, err + } + n += n2 + } + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + if r.i < r.j { + c := r.decoded[r.i] + r.i++ + return c, nil + } + var tmp [1]byte + for range 10 { + n, err := r.Read(tmp[:]) + if err != nil { + return 0, err + } + if n == 1 { + return tmp[0], nil + } + } + return 0, io.ErrNoProgress +} + +// SkippableCB will register a callback for chunks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// Sending a nil function will disable previous callbacks. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. +func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { + if id < 0x80 || id >= chunkTypePadding { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") + } + r.skippableCB[id-0x80] = fn + return nil +} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go new file mode 100644 index 000000000000..cbd1ed64d698 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -0,0 +1,151 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2 implements the S2 compression format. +// +// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, +// which is why it features concurrent compression for bigger payloads. +// +// Decoding is compatible with Snappy compressed content, +// but content compressed with S2 cannot be decompressed by Snappy. +// +// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 +// +// There are actually two S2 formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a S2 stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// A "better" compression option is available. This will trade some compression +// speed +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// Blocks to not offer much data protection, so it is up to you to +// add data validation of decompressed blocks. +// +// Streams perform CRC validation of the decompressed data. +// Stream compression will also be performed on multiple CPU cores concurrently +// significantly improving throughput. +package s2 + +import ( + "bytes" + "hash/crc32" + + "github.com/klauspost/compress/internal/race" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy + magicBodySnappy = "sNaPpY" + magicBody = "S2sTwO" + + // maxBlockSize is the maximum size of the input to encodeBlock. + // + // For the framing format (Writer type instead of Encode function), + // this is the maximum uncompressed size of a block. + maxBlockSize = 4 << 20 + + // minBlockSize is the minimum size of block setting when creating a writer. + minBlockSize = 4 << 10 + + skippableFrameHeader = 4 + maxChunkSize = 1<<24 - 1 // 16777215 + + // Default block size + defaultBlockSize = 1 << 20 + + // maxSnappyBlockSize is the maximum snappy block size. + maxSnappyBlockSize = 1 << 16 + + obufHeaderLen = checksumSize + chunkHeaderSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + ChunkTypeIndex = 0x99 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + crcTable = crc32.MakeTable(crc32.Castagnoli) + magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes. + magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes. +) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + race.ReadSlice(b) + + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// literalExtraSize returns the extra size of encoding n literals. +// n should be >= 0 and <= math.MaxUint32. +func literalExtraSize(n int64) int64 { + if n == 0 { + return 0 + } + switch { + case n < 60: + return 1 + case n < 1<<8: + return 2 + case n < 1<<16: + return 3 + case n < 1<<24: + return 4 + default: + return 5 + } +} + +type byter interface { + Bytes() []byte +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go new file mode 100644 index 000000000000..09f1cff3a965 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -0,0 +1,1064 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/internal/race" +) + +const ( + levelUncompressed = iota + 1 + levelFast + levelBetter + levelBest +) + +// NewWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// Users must call Close to guarantee all data has been forwarded to +// the underlying io.Writer and that resources are released. +// They may also call Flush zero or more times before calling Close. +func NewWriter(w io.Writer, opts ...WriterOption) *Writer { + w2 := Writer{ + blockSize: defaultBlockSize, + concurrency: runtime.GOMAXPROCS(0), + randSrc: rand.Reader, + level: levelFast, + } + for _, opt := range opts { + if err := opt(&w2); err != nil { + w2.errState = err + return &w2 + } + } + w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) + w2.paramsOK = true + w2.ibuf = make([]byte, 0, w2.blockSize) + w2.buffers.New = func() any { + return make([]byte, w2.obufLen) + } + w2.Reset(w) + return &w2 +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + errMu sync.Mutex + errState error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + ibuf []byte + + blockSize int + obufLen int + concurrency int + written int64 + uncompWritten int64 // Bytes sent to compression + output chan chan result + buffers sync.Pool + pad int + + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + customEnc func(dst, src []byte) int + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool + paramsOK bool + snappy bool + flushOnWrite bool + appendIndex bool + bufferCB func([]byte) + level uint8 +} + +type result struct { + b []byte + // return when writing + ret []byte + // Uncompressed start offset + startOffset int64 +} + +// err returns the previously set error. +// If no error has been set it is set to err if not nil. +func (w *Writer) err(err error) error { + w.errMu.Lock() + errSet := w.errState + if errSet == nil && err != nil { + w.errState = err + errSet = err + } + w.errMu.Unlock() + return errSet +} + +// Reset discards the writer's state and switches the Snappy writer to write to w. +// This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + if !w.paramsOK { + return + } + // Close previous writer, if any. + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + w.errState = nil + w.ibuf = w.ibuf[:0] + w.wroteStreamHeader = false + w.written = 0 + w.writer = writer + w.uncompWritten = 0 + w.index.reset(w.blockSize) + + // If we didn't get a writer, stop here. + if writer == nil { + return + } + // If no concurrency requested, don't spin up writer goroutine. + if w.concurrency == 1 { + return + } + + toWrite := make(chan chan result, w.concurrency) + w.output = toWrite + w.writerWg.Add(1) + + // Start a writer goroutine that will write all output in order. + go func() { + defer w.writerWg.Done() + + // Get a queued write. + for write := range toWrite { + // Wait for the data to be available. + input := <-write + if input.ret != nil && w.bufferCB != nil { + w.bufferCB(input.ret) + input.ret = nil + } + in := input.b + if len(in) > 0 { + if w.err(nil) == nil { + // Don't expose data from previous buffers. + toWrite := in[:len(in):len(in)] + // Write to output. + n, err := writer.Write(toWrite) + if err == nil && n != len(toWrite) { + err = io.ErrShortBuffer + } + _ = w.err(err) + w.err(w.index.add(w.written, input.startOffset)) + w.written += int64(n) + } + } + if cap(in) >= w.obufLen { + w.buffers.Put(in) + } + // close the incoming write request. + // This can be used for synchronizing flushes. + close(write) + } + }() +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.flushOnWrite { + return w.write(p) + } + // If we exceed the input buffer size, start writing + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + } + nRet += n + p = p[n:] + } + if err := w.err(nil); err != nil { + return nRet, err + } + // p should always be able to fit into w.ibuf now. + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +// ReadFrom implements the io.ReaderFrom interface. +// Using this is typically more efficient since it avoids a memory copy. +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if err := w.err(nil); err != nil { + return 0, err + } + if len(w.ibuf) > 0 { + err := w.AsyncFlush() + if err != nil { + return 0, err + } + } + if br, ok := r.(byter); ok { + buf := br.Bytes() + if err := w.EncodeBuffer(buf); err != nil { + return 0, err + } + return int64(len(buf)), w.AsyncFlush() + } + for { + inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] + n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + if err != io.EOF { + return n, w.err(err) + } + } + if n2 == 0 { + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } + break + } + n += int64(n2) + err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) + if w.err(err2) != nil { + break + } + + if err != nil { + // We got EOF and wrote everything + break + } + } + + return n, w.err(nil) +} + +// AddSkippableBlock will add a skippable block to the stream. +// The ID must be 0x80-0xfe (inclusive). +// Length of the skippable block must be <= 16777215 bytes. +func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + if len(data) == 0 { + return nil + } + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("invalid skippable block id %x", id) + } + if len(data) > maxChunkSize { + return fmt.Errorf("skippable block excessed maximum size") + } + var header [4]byte + chunkLen := len(data) + header[0] = id + header[1] = uint8(chunkLen >> 0) + header[2] = uint8(chunkLen >> 8) + header[3] = uint8(chunkLen >> 16) + if w.concurrency == 1 { + write := func(b []byte) error { + n, err := w.writer.Write(b) + if err = w.err(err); err != nil { + return err + } + if n != len(b) { + return w.err(io.ErrShortWrite) + } + w.written += int64(n) + return w.err(nil) + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + if w.snappy { + if err := write([]byte(magicChunkSnappy)); err != nil { + return err + } + } else { + if err := write([]byte(magicChunk)); err != nil { + return err + } + } + } + if err := write(header[:]); err != nil { + return err + } + return write(data) + } + + // Create output... + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + // Copy input. + inbuf := w.buffers.Get().([]byte)[:4] + copy(inbuf, header[:]) + inbuf = append(inbuf, data...) + + output := make(chan result, 1) + // Queue output. + w.output <- output + output <- result{startOffset: w.uncompWritten, b: inbuf} + + return nil +} + +// EncodeBuffer will add a buffer to the stream. +// This is the fastest way to encode a stream, +// but the input buffer cannot be written to by the caller +// until Flush or Close has been called when concurrency != 1. +// +// Use the WriterBufferDone to receive a callback when the buffer is done +// Processing. +// +// Note that input is not buffered. +// This means that each write will result in discrete blocks being created. +// For buffered writes, use the regular Write function. +func (w *Writer) EncodeBuffer(buf []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + + if w.flushOnWrite { + _, err := w.write(buf) + return err + } + // Flush queued data first. + if len(w.ibuf) > 0 { + err := w.AsyncFlush() + if err != nil { + return err + } + } + if w.concurrency == 1 { + _, err := w.writeSync(buf) + if w.bufferCB != nil { + w.bufferCB(buf) + } + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + orgBuf := buf + for len(buf) > 0 { + // Cut input. + uncompressed := buf + if len(uncompressed) > w.blockSize { + uncompressed = uncompressed[:w.blockSize] + } + buf = buf[len(uncompressed):] + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + race.WriteSlice(obuf) + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + if len(buf) == 0 && w.bufferCB != nil { + res.ret = orgBuf + } + go func() { + race.ReadSlice(uncompressed) + + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // copy uncompressed + copy(obuf[obufHeaderLen:], uncompressed) + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + }() + } + return nil +} + +func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.customEnc != nil { + if ret := w.customEnc(obuf, uncompressed); ret >= 0 { + return ret + } + } + if w.snappy { + switch w.level { + case levelFast: + return encodeBlockSnappy(obuf, uncompressed) + case levelBetter: + return encodeBlockBetterSnappy(obuf, uncompressed) + case levelBest: + return encodeBlockBestSnappy(obuf, uncompressed) + } + return 0 + } + switch w.level { + case levelFast: + return encodeBlock(obuf, uncompressed) + case levelBetter: + return encodeBlockBetter(obuf, uncompressed) + case levelBest: + return encodeBlockBest(obuf, uncompressed, nil) + } + return 0 +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.concurrency == 1 { + return w.writeSync(p) + } + + // Spawn goroutine and write block to output channel. + for len(p) > 0 { + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + // Copy input. + // If the block is incompressible, this is used for the result. + inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + obuf := w.buffers.Get().([]byte)[:w.obufLen] + copy(inbuf[obufHeaderLen:], uncompressed) + uncompressed = inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + nRet += len(uncompressed) + } + return nRet, nil +} + +// writeFull is a special version of write that will always write the full buffer. +// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. +// The data will be written as a single block. +// The caller is not allowed to use inbuf after this function has been called. +func (w *Writer) writeFull(inbuf []byte) (errRet error) { + if err := w.err(nil); err != nil { + return err + } + + if w.concurrency == 1 { + _, err := w.writeSync(inbuf[obufHeaderLen:]) + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:w.obufLen] + uncompressed := inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + return nil +} + +func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + var n int + var err error + if w.snappy { + n, err = w.writer.Write(magicChunkSnappyBytes) + } else { + n, err = w.writer.Write(magicChunkBytes) + } + if err != nil { + return 0, w.err(err) + } + if n != len(magicChunk) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + + for len(p) > 0 { + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + obuf := w.buffers.Get().([]byte)[:w.obufLen] + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + obuf = obuf[:8] + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + n, err := w.writer.Write(obuf) + if err != nil { + return 0, w.err(err) + } + if n != len(obuf) { + return 0, w.err(io.ErrShortWrite) + } + w.err(w.index.add(w.written, w.uncompWritten)) + w.written += int64(n) + w.uncompWritten += int64(len(uncompressed)) + + if chunkType == chunkTypeUncompressedData { + // Write uncompressed data. + n, err := w.writer.Write(uncompressed) + if err != nil { + return 0, w.err(err) + } + if n != len(uncompressed) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + w.buffers.Put(obuf) + // Queue final output. + nRet += len(uncompressed) + } + return nRet, nil +} + +// AsyncFlush writes any buffered bytes to a block and starts compressing it. +// It does not wait for the output has been written as Flush() does. +func (w *Writer) AsyncFlush() error { + if err := w.err(nil); err != nil { + return err + } + + // Queue any data still in input buffer. + if len(w.ibuf) != 0 { + if !w.wroteStreamHeader { + _, err := w.writeSync(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err(err) + } else { + _, err := w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + err = w.err(err) + if err != nil { + return err + } + } + } + return w.err(nil) +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.AsyncFlush(); err != nil { + return err + } + if w.output == nil { + return w.err(nil) + } + + // Send empty buffer + res := make(chan result) + w.output <- res + // Block until this has been picked up. + res <- result{b: nil, startOffset: w.uncompWritten} + // When it is closed, we have flushed. + <-res + return w.err(nil) +} + +// Close calls Flush and then closes the Writer. +// Calling Close multiple times is ok, +// but calling CloseIndex after this will make it not return the index. +func (w *Writer) Close() error { + _, err := w.closeIndex(w.appendIndex) + return err +} + +// CloseIndex calls Close and returns an index on first call. +// This is not required if you are only adding index to a stream. +func (w *Writer) CloseIndex() ([]byte, error) { + return w.closeIndex(true) +} + +func (w *Writer) closeIndex(idx bool) ([]byte, error) { + err := w.Flush() + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + + var index []byte + if w.err(err) == nil && w.writer != nil { + // Create index. + if idx { + compSize := int64(-1) + if w.pad <= 1 { + compSize = w.written + } + index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) + // Count as written for padding. + if w.appendIndex { + w.written += int64(len(index)) + } + } + + if w.pad > 1 { + tmp := w.ibuf[:0] + if len(index) > 0 { + // Allocate another buffer. + tmp = w.buffers.Get().([]byte)[:0] + defer w.buffers.Put(tmp) + } + add := calcSkippableFrame(w.written, int64(w.pad)) + frame, err := skippableFrame(tmp, add, w.randSrc) + if err = w.err(err); err != nil { + return nil, err + } + n, err2 := w.writer.Write(frame) + if err2 == nil && n != len(frame) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + if len(index) > 0 && w.appendIndex { + n, err2 := w.writer.Write(index) + if err2 == nil && n != len(index) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + } + err = w.err(errClosed) + if err == errClosed { + return index, nil + } + return nil, err +} + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) + } + if int64(total) >= maxBlockSize+skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) + } + // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" + dst = append(dst, chunkTypePadding) + f := uint32(total - skippableFrameHeader) + // Add chunk length. + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) + // Add data + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} + +var errClosed = errors.New("s2: Writer is closed") + +// WriterOption is an option for creating a encoder. +type WriterOption func(*Writer) error + +// WriterConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WriterConcurrency(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + w.concurrency = n + return nil + } +} + +// WriterAddIndex will append an index to the end of a stream +// when it is closed. +func WriterAddIndex() WriterOption { + return func(w *Writer) error { + w.appendIndex = true + return nil + } +} + +// WriterBetterCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +func WriterBetterCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBetter + return nil + } +} + +// WriterBestCompression will enable better compression. +// EncodeBest compresses better than Encode but typically with a +// big speed decrease on compression. +func WriterBestCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBest + return nil + } +} + +// WriterUncompressed will bypass compression. +// The stream will be written as uncompressed blocks only. +// If concurrency is > 1 CRC and output will still be done async. +func WriterUncompressed() WriterOption { + return func(w *Writer) error { + w.level = levelUncompressed + return nil + } +} + +// WriterBufferDone will perform a callback when EncodeBuffer has finished +// writing a buffer to the output and the buffer can safely be reused. +// If the buffer was split into several blocks, it will be sent after the last block. +// Callbacks will not be done concurrently. +func WriterBufferDone(fn func(b []byte)) WriterOption { + return func(w *Writer) error { + w.bufferCB = fn + return nil + } +} + +// WriterBlockSize allows to override the default block size. +// Blocks will be this size or smaller. +// Minimum size is 4KB and maximum size is 4MB. +// +// Bigger blocks may give bigger throughput on systems with many cores, +// and will increase compression slightly, but it will limit the possible +// concurrency for smaller payloads for both encoding and decoding. +// Default block size is 1MB. +// +// When writing Snappy compatible output using WriterSnappyCompat, +// the maximum block size is 64KB. +func WriterBlockSize(n int) WriterOption { + return func(w *Writer) error { + if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") + } + if n > maxBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") + } + w.blockSize = n + return nil + } +} + +// WriterPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 4MB. +// The padded area will be filled with data from crypto/rand.Reader. +// The padding will be applied whenever Close is called on the writer. +func WriterPadding(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return fmt.Errorf("s2: padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + w.pad = 0 + } + if n > maxBlockSize { + return fmt.Errorf("s2: padding must less than 4MB") + } + w.pad = n + return nil + } +} + +// WriterPaddingSrc will get random data for padding from the supplied source. +// By default crypto/rand is used. +func WriterPaddingSrc(reader io.Reader) WriterOption { + return func(w *Writer) error { + w.randSrc = reader + return nil + } +} + +// WriterSnappyCompat will write snappy compatible output. +// The output can be decompressed using either snappy or s2. +// If block size is more than 64KB it is set to that. +func WriterSnappyCompat() WriterOption { + return func(w *Writer) error { + w.snappy = true + if w.blockSize > 64<<10 { + // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. + // And allows us to skip some size checks. + w.blockSize = (64 << 10) - 8 + } + return nil + } +} + +// WriterFlushOnWrite will compress blocks on each call to the Write function. +// +// This is quite inefficient as blocks size will depend on the write size. +// +// Use WriterConcurrency(1) to also make sure that output is flushed. +// When Write calls return, otherwise they will be written when compression is done. +func WriterFlushOnWrite() WriterOption { + return func(w *Writer) error { + w.flushOnWrite = true + return nil + } +} + +// WriterCustomEncoder allows to override the encoder for blocks on the stream. +// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer. +// Block size (initial varint) should not be added by the encoder. +// Returning value 0 indicates the block could not be compressed. +// Returning a negative value indicates that compression should be attempted. +// The function should expect to be called concurrently. +func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption { + return func(w *Writer) error { + w.customEnc = fn + return nil + } +} diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml new file mode 100644 index 000000000000..1b695b62c351 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -0,0 +1,57 @@ +version: 2 + +builds: + - + id: "cpuid" + binary: cpuid + main: ./cmd/cpuid/main.go + env: + - CGO_ENABLED=0 + flags: + - -ldflags=-s -w + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm64 + goarm: + - 7 + +archives: + - + id: cpuid + name_template: "cpuid-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - LICENSE +checksum: + name_template: 'checksums.txt' +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "cpuid_package_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/cpuid + maintainer: Klaus Post + description: CPUID Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt new file mode 100644 index 000000000000..2ef4714f7165 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE new file mode 100644 index 000000000000..5cec7ee949b1 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md new file mode 100644 index 000000000000..7b1d59921160 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -0,0 +1,508 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) +[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml) + +## installing + +`go get -u github.com/klauspost/cpuid/v2` using modules. +Drop `v2` for others. + +Installing binary: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +Or download binaries from release page: https://github.com/klauspost/cpuid/releases + +### Homebrew + +For macOS/Linux users, you can install via [brew](https://brew.sh/) + +```sh +$ brew install cpuid +``` + +## example + +```Go +package main + +import ( + "fmt" + "strings" + + . "github.com/klauspost/cpuid/v2" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID) + fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ",")) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes") + fmt.Println("Frequency", CPU.Hz, "hz") + + // Test if we have these specific features: + if CPU.Supports(SSE, SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: AMD Ryzen 9 3950X 16-Core Processor +PhysicalCores: 16 +ThreadsPerCore: 2 +LogicalCores: 32 +Family 23 Model: 113 Vendor ID: AMD +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3 +Cacheline bytes: 64 +L1 Data Cache: 32768 bytes +L1 Instruction Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes +Frequency 0 hz +We have Streaming SIMD 2 Extensions +``` + +# usage + +The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. +A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. + +To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc. +This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported. + +Note that for some cpu/os combinations some features will not be detected. +`amd64` has rather good support and should work reliably on all platforms. + +Note that hypervisors may not pass through all CPU features through to the guest OS, +so even if your host supports a feature it may not be visible on guests. + +## arm64 feature detection + +Not all operating systems provide ARM features directly +and there is no safe way to do so for the rest. + +Currently `arm64/linux` and `arm64/freebsd` should be quite reliable. +`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected. + +A `DetectARM()` can be used if you are able to control your deployment, +it will detect CPU features, but may crash if the OS doesn't intercept the calls. +A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below. + +Note that currently only features are detected on ARM, +no additional information is currently available. + +## flags + +It is possible to add flags that affects cpu detection. + +For this the `Flags()` command is provided. + +This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called. + +This means that any detection used in `init()` functions will not contain these flags. + +Example: + +```Go +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/klauspost/cpuid/v2" +) + +func main() { + cpuid.Flags() + flag.Parse() + cpuid.Detect() + + // Test if we have these specific features: + if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +## commandline + +Download as binary from: https://github.com/klauspost/cpuid/releases + +Install from source: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +### Example + +``` +λ cpuid +Name: AMD Ryzen 9 3950X 16-Core Processor +Vendor String: AuthenticAMD +Vendor ID: AMD +PhysicalCores: 16 +Threads Per Core: 2 +Logical Cores: 32 +CPU Family 23 Model: 113 +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE +Microarchitecture level: 3 +Cacheline bytes: 64 +L1 Instruction Cache: 32768 bytes +L1 Data Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes + +``` +### JSON Output: + +``` +λ cpuid --json +{ + "BrandName": "AMD Ryzen 9 3950X 16-Core Processor", + "VendorID": 2, + "VendorString": "AuthenticAMD", + "PhysicalCores": 16, + "ThreadsPerCore": 2, + "LogicalCores": 32, + "Family": 23, + "Model": 113, + "CacheLine": 64, + "Hz": 0, + "BoostFreq": 0, + "Cache": { + "L1I": 32768, + "L1D": 32768, + "L2": 524288, + "L3": 16777216 + }, + "SGX": { + "Available": false, + "LaunchControl": false, + "SGX1Supported": false, + "SGX2Supported": false, + "MaxEnclaveSizeNot64": 0, + "MaxEnclaveSize64": 0, + "EPCSections": null + }, + "Features": [ + "ADX", + "AESNI", + "AVX", + "AVX2", + "BMI1", + "BMI2", + "CLMUL", + "CLZERO", + "CMOV", + "CMPXCHG8", + "CPBOOST", + "CX16", + "F16C", + "FMA3", + "FXSR", + "FXSROPT", + "HTT", + "HYPERVISOR", + "LAHF", + "LZCNT", + "MCAOVERFLOW", + "MMX", + "MMXEXT", + "MOVBE", + "NX", + "OSXSAVE", + "POPCNT", + "RDRAND", + "RDSEED", + "RDTSCP", + "SCE", + "SHA", + "SSE", + "SSE2", + "SSE3", + "SSE4", + "SSE42", + "SSE4A", + "SSSE3", + "SUCCOR", + "X87", + "XSAVE" + ], + "X64Level": 3 +} +``` + +### Check CPU microarch level + +``` +λ cpuid --check-level=3 +2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor +2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3. +Exit Code 0 + +λ cpuid --check-level=4 +2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor +2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3. +Exit Code 1 +``` + + +## Available flags + +### x86 & amd64 + +| Feature Flag | Description | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) | +| AESNI | Advanced Encryption Standard New Instructions | +| AMD3DNOW | AMD 3DNOW | +| AMD3DNOWEXT | AMD 3DNowExt | +| AMXBF16 | Tile computational operations on BFLOAT16 numbers | +| AMXINT8 | Tile computational operations on 8-bit integers | +| AMXFP16 | Tile computational operations on FP16 numbers | +| AMXFP8 | Tile computational operations on FP8 numbers | +| AMXCOMPLEX | Tile computational operations on complex numbers | +| AMXTILE | Tile architecture | +| AMXTF32 | Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile | +| AMXTRANSPOSE | Tile multiply where the first operand is transposed | +| APX_F | Intel APX | +| AVX | AVX functions | +| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | +| AVX10_128 | If set indicates that AVX10 128-bit vector support is present | +| AVX10_256 | If set indicates that AVX10 256-bit vector support is present | +| AVX10_512 | If set indicates that AVX10 512-bit vector support is present | +| AVX2 | AVX2 functions | +| AVX512BF16 | AVX-512 BFLOAT16 Instructions | +| AVX512BITALG | AVX-512 Bit Algorithms | +| AVX512BW | AVX-512 Byte and Word Instructions | +| AVX512CD | AVX-512 Conflict Detection Instructions | +| AVX512DQ | AVX-512 Doubleword and Quadword Instructions | +| AVX512ER | AVX-512 Exponential and Reciprocal Instructions | +| AVX512F | AVX-512 Foundation | +| AVX512FP16 | AVX-512 FP16 Instructions | +| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions | +| AVX512PF | AVX-512 Prefetch Instructions | +| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions | +| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 | +| AVX512VL | AVX-512 Vector Length Extensions | +| AVX512VNNI | AVX-512 Vector Neural Network Instructions | +| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q | +| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword | +| AVXIFMA | AVX-IFMA instructions | +| AVXNECONVERT | AVX-NE-CONVERT instructions | +| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | +| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | +| AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| AVXVNNIINT16 | AVX-VNNI-INT16 instructions | +| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | +| BMI1 | Bit Manipulation Instruction Set 1 | +| BMI2 | Bit Manipulation Instruction Set 2 | +| CETIBT | Intel CET Indirect Branch Tracking | +| CETSS | Intel CET Shadow Stack | +| CLDEMOTE | Cache Line Demote | +| CLMUL | Carry-less Multiplication | +| CLZERO | CLZERO instruction supported | +| CMOV | i686 CMOV | +| CMPCCXADD | CMPCCXADD instructions | +| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB | +| CMPXCHG8 | CMPXCHG8 instruction | +| CPBOOST | Core Performance Boost | +| CPPC | AMD: Collaborative Processor Performance Control | +| CX16 | CMPXCHG16B Instruction | +| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ | +| ENQCMD | Enqueue Command | +| ERMS | Enhanced REP MOVSB/STOSB | +| F16C | Half-precision floating-point conversion | +| FLUSH_L1D | Flush L1D cache | +| FMA3 | Intel FMA 3. Does not imply AVX. | +| FMA4 | Bulldozer FMA4 functions | +| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide | +| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide | +| FSRM | Fast Short Rep Mov | +| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 | +| FXSROPT | FXSAVE/FXRSTOR optimizations | +| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. | +| HLE | Hardware Lock Elision | +| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR | +| HTT | Hyperthreading (enabled) | +| HWA | Hardware assert supported. Indicates support for MSRC001_10 | +| HYBRID_CPU | This part has CPUs of more than one type. | +| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors | +| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) | +| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR | +| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) | +| IBRS | AMD: Indirect Branch Restricted Speculation | +| IBRS_PREFERRED | AMD: IBRS is preferred over software solution | +| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection | +| IBS | Instruction Based Sampling (AMD) | +| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) | +| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) | +| IBSFFV | Instruction Based Sampling Feature (AMD) | +| IBSOPCNT | Instruction Based Sampling Feature (AMD) | +| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) | +| IBSOPSAM | Instruction Based Sampling Feature (AMD) | +| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) | +| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) | +| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported | +| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported | +| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | +| IBS_PREVENTHOST | Disallowing IBS use by the host supported | +| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| IDPRED_CTRL | IPRED_DIS | +| INT_WBINVD | WBINVD/WBNOINVD are interruptible. | +| INVLPGB | NVLPGB and TLBSYNC instruction supported | +| KEYLOCKER | Key locker | +| KEYLOCKERW | Key locker wide | +| LAHF | LAHF/SAHF in long mode | +| LAM | If set, CPU supports Linear Address Masking | +| LBRVIRT | LBR virtualization | +| LZCNT | LZCNT instruction | +| MCAOVERFLOW | MCA overflow recovery support. | +| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. | +| MCOMMIT | MCOMMIT instruction supported | +| MD_CLEAR | VERW clears CPU buffers | +| MMX | standard MMX | +| MMXEXT | SSE integer functions or AMD MMX ext | +| MOVBE | MOVBE instruction (big-endian) | +| MOVDIR64B | Move 64 Bytes as Direct Store | +| MOVDIRI | Move Doubleword as Direct Store | +| MOVSB_ZL | Fast Zero-Length MOVSB | +| MPX | Intel MPX (Memory Protection Extensions) | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MSRIRC | Instruction Retired Counter MSR available | +| MSRLIST | Read/Write List of Model Specific Registers | +| MSR_PAGEFLUSH | Page Flush MSR available | +| NRIPS | Indicates support for NRIP save on VMEXIT | +| NX | NX (No-Execute) bit | +| OSXSAVE | XSAVE enabled by OS | +| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | +| POPCNT | POPCNT instruction | +| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | Predictive Store Forward Disable | +| RDPRU | RDPRU instruction supported | +| RDRAND | RDRAND instruction is available | +| RDSEED | RDSEED instruction is available | +| RDTSCP | RDTSCP Instruction | +| RRSBA_CTRL | Restricted RSB Alternate | +| RTM | Restricted Transactional Memory | +| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | +| SERIALIZE | Serialize Instruction Execution | +| SEV | AMD Secure Encrypted Virtualization supported | +| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host | +| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported | +| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests | +| SEV_ES | AMD SEV Encrypted State supported | +| SEV_RESTRICTED | AMD SEV Restricted Injection supported | +| SEV_SNP | AMD SEV Secure Nested Paging supported | +| SGX | Software Guard Extensions | +| SGXLC | Software Guard Extensions Launch Control | +| SHA | Intel SHA Extensions | +| SME | AMD Secure Memory Encryption supported | +| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced | +| SM3_X86 | SM3 instructions | +| SM4_X86 | SM4 instructions | +| SPEC_CTRL_SSBD | Speculative Store Bypass Disable | +| SRBDS_CTRL | SRBDS mitigation MSR available | +| SSE | SSE functions | +| SSE2 | P4 SSE functions | +| SSE3 | Prescott SSE3 functions | +| SSE4 | Penryn SSE4.1 functions | +| SSE42 | Nehalem SSE4.2 functions | +| SSE4A | AMD Barcelona microarchitecture SSE4a instructions | +| SSSE3 | Conroe SSSE3 functions | +| STIBP | Single Thread Indirect Branch Predictors | +| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On | +| STOSB_SHORT | Fast short STOSB | +| SUCCOR | Software uncorrectable error containment and recovery capability. | +| SVM | AMD Secure Virtual Machine | +| SVMDA | Indicates support for the SVM decode assists. | +| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control | +| SVML | AMD SVM lock. Indicates support for SVM-Lock. | +| SVMNP | AMD SVM nested paging | +| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter | +| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold | +| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. | +| SYSEE | SYSENTER and SYSEXIT instructions | +| TBM | AMD Trailing Bit Manipulation | +| TDX_GUEST | Intel Trust Domain Extensions Guest | +| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations | +| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. | +| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. | +| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 | +| TSXLDTRK | Intel TSX Suspend Load Address Tracking | +| VAES | Vector AES. AVX(512) versions requires additional checks. | +| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. | +| VMPL | AMD VM Permission Levels supported | +| VMSA_REGPROT | AMD VMSA Register Protection supported | +| VMX | Virtual Machine Extensions | +| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. | +| VTE | AMD Virtual Transparent Encryption supported | +| WAITPKG | TPAUSE, UMONITOR, UMWAIT | +| WBNOINVD | Write Back and Do Not Invalidate Cache | +| WRMSRNS | Non-Serializing Write to Model Specific Register | +| X87 | FPU | +| XGETBV1 | Supports XGETBV with ECX = 1 | +| XOP | Bulldozer XOP functions | +| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV | +| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. | +| XSAVEOPT | XSAVEOPT available | +| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS | + +# ARM features: + +| Feature Flag | Description | +|--------------|------------------------------------------------------------------| +| AESARM | AES instructions | +| ARMCPUID | Some CPU ID registers readable at user-level | +| ASIMD | Advanced SIMD | +| ASIMDDP | SIMD Dot Product | +| ASIMDHP | Advanced SIMD half-precision floating point | +| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) | +| ATOMICS | Large System Extensions (LSE) | +| CRC32 | CRC32/CRC32C instructions | +| DCPOP | Data cache clean to Point of Persistence (DC CVAP) | +| EVTSTRM | Generic timer | +| FCMA | Floatin point complex number addition and multiplication | +| FHM | FMLAL and FMLSL instructions | +| FP | Single-precision and double-precision floating point | +| FPHP | Half-precision floating point | +| GPA | Generic Pointer Authentication | +| JSCVT | Javascript-style double->int convert (FJCVTZS) | +| LRCPC | Weaker release consistency (LDAPR, etc) | +| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| RNDR | Random Number instructions | +| TLB | Outer Shareable and TLB range maintenance instructions | +| TS | Flag manipulation instructions | +| SHA1 | SHA-1 instructions (SHA1C, etc) | +| SHA2 | SHA-2 instructions (SHA256H, etc) | +| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | +| SHA512 | SHA512 instructions | +| SM3 | SM3 instructions | +| SM4 | SM4 instructions | +| SVE | Scalable Vector Extension | + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go new file mode 100644 index 000000000000..248439a9a576 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -0,0 +1,1577 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) as well as arm64 is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import ( + "flag" + "fmt" + "math" + "math/bits" + "os" + "runtime" + "strings" +) + +// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf +// and Processor Programming Reference (PPR) + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + VendorUnknown Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM + Bhyve + Hygon + SiS + RDC + + Ampere + ARM + Broadcom + Cavium + DEC + Fujitsu + Infineon + Motorola + NVIDIA + AMCC + Qualcomm + Marvell + + QEMU + QNX + ACRN + SRE + Apple + + lastVendor +) + +//go:generate stringer -type=FeatureID,Vendor + +// FeatureID is the ID of a specific cpu feature. +type FeatureID int + +const ( + // Keep index -1 as unknown + UNKNOWN = -1 + + // x86 features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXFP8 // Tile computational operations on FP8 numbers + AMXTILE // Tile architecture + AMXTF32 // Tile architecture + AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile + AMXTRANSPOSE // Tile multiply where the first operand is transposed + APX_F // Intel APX + AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions + AVXVNNIINT16 // AVX-VNNI-INT16 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control + CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. + HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse + IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide + LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. + MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption + POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // Predictive Store Forward Disable + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SBPB // Indicates support for the Selective Branch Predictor Barrier + SERIALIZE // Serialize Instruction Execution + SEV // AMD Secure Encrypted Virtualization supported + SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host + SEV_ALTERNATIVE // AMD SEV Alternate Injection supported + SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests + SEV_ES // AMD SEV Encrypted State supported + SEV_RESTRICTED // AMD SEV Restricted Injection supported + SEV_SNP // AMD SEV Secure Nested Paging supported + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SME // AMD Secure Memory Encryption supported + SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SM3_X86 // SM3 instructions + SM4_X86 // SM4 instructions + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available + SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. + SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability + SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On + STOSB_SHORT // Fast short STOSB + SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions + TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations + TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. + VMPL // AMD VM Permission Levels supported + VMSA_REGPROT // AMD VMSA Register Protection supported + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. + VTE // AMD Virtual Transparent Encryption supported + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register + X87 // FPU + XGETBV1 // Supports XGETBV with ECX = 1 + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. + XSAVEOPT // XSAVEOPT available + XSAVES // Supports XSAVES/XRSTORS and IA32_XSS + + // ARM features: + AESARM // AES instructions + ARMCPUID // Some CPU ID registers readable at user-level + ASIMD // Advanced SIMD + ASIMDDP // SIMD Dot Product + ASIMDHP // Advanced SIMD half-precision floating point + ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) + ATOMICS // Large System Extensions (LSE) + CRC32 // CRC32/CRC32C instructions + DCPOP // Data cache clean to Point of Persistence (DC CVAP) + EVTSTRM // Generic timer + FCMA // Floating point complex number addition and multiplication + FHM // FMLAL and FMLSL instructions + FP // Single-precision and double-precision floating point + FPHP // Half-precision floating point + GPA // Generic Pointer Authentication + JSCVT // Javascript-style double->int convert (FJCVTZS) + LRCPC // Weaker release consistency (LDAPR, etc) + PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + RNDR // Random Number instructions + TLB // Outer Shareable and TLB range maintenance instructions + TS // Flag manipulation instructions + SHA1 // SHA-1 instructions (SHA1C, etc) + SHA2 // SHA-2 instructions (SHA256H, etc) + SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + SHA512 // SHA512 instructions + SM3 // SM3 instructions + SM4 // SM4 instructions + SVE // Scalable Vector Extension + // Keep it last. It automatically defines the size of []flagSet + lastID + + firstID FeatureID = UNKNOWN + 1 +) + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + HypervisorVendorID Vendor // Hypervisor vendor + HypervisorVendorString string // Raw hypervisor vendor string + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + Stepping int // CPU stepping info + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected + } + SGX SGXSupport + AMDMemEncryption AMDMemEncryptionSupport + AVX10Level uint8 + + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) +var darwinHasAVX512 = func() bool { return false } + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data. +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + // Set defaults + CPU.ThreadsPerCore = 1 + CPU.Cache.L1I = -1 + CPU.Cache.L1D = -1 + CPU.Cache.L2 = -1 + CPU.Cache.L3 = -1 + safe := true + if detectArmFlag != nil { + safe = !*detectArmFlag + } + addInfo(&CPU, safe) + if displayFeats != nil && *displayFeats { + fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ",")) + // Exit with non-zero so tests will print value. + os.Exit(1) + } + if disableFlag != nil { + s := strings.Split(*disableFlag, ",") + for _, feat := range s { + feat := ParseFeature(strings.TrimSpace(feat)) + if feat != UNKNOWN { + CPU.featureSet.unset(feat) + } + } + } +} + +// DetectARM will detect ARM64 features. +// This is NOT done automatically since it can potentially crash +// if the OS does not handle the command. +// If in the future this can be done safely this function may not +// do anything. +func DetectARM() { + addInfo(&CPU, false) +} + +var detectArmFlag *bool +var displayFeats *bool +var disableFlag *string + +// Flags will enable flags. +// This must be called *before* flag.Parse AND +// Detect must be called after the flags have been parsed. +// Note that this means that any detection used in init() functions +// will not contain these flags. +func Flags() { + disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list") + displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits") + detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash") +} + +// Supports returns whether the CPU supports all of the requested features. +func (c CPUInfo) Supports(ids ...FeatureID) bool { + for _, id := range ids { + if !c.featureSet.inSet(id) { + return false + } + } + return true +} + +// Has allows for checking a single feature. +// Should be inlined by the compiler. +func (c *CPUInfo) Has(id FeatureID) bool { + return c.featureSet.inSet(id) +} + +// AnyOf returns whether the CPU supports one or more of the requested features. +func (c CPUInfo) AnyOf(ids ...FeatureID) bool { + for _, id := range ids { + if c.featureSet.inSet(id) { + return true + } + } + return false +} + +// Features contains several features combined for a fast check using +// CpuInfo.HasAll +type Features *flagSet + +// CombineFeatures allows to combine several features for a close to constant time lookup. +func CombineFeatures(ids ...FeatureID) Features { + var v flagSet + for _, id := range ids { + v.set(id) + } + return &v +} + +func (c *CPUInfo) HasAll(f Features) bool { + return c.featureSet.hasSetP(f) +} + +// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +var oneOfLevel = CombineFeatures(SYSEE, SYSCALL) +var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2) +var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) +var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) +var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) + +// X64Level returns the microarchitecture level detected on the CPU. +// If features are lacking or non x64 mode, 0 is returned. +// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +func (c CPUInfo) X64Level() int { + if !c.featureSet.hasOneOf(oneOfLevel) { + return 0 + } + if c.featureSet.hasSetP(level4Features) { + return 4 + } + if c.featureSet.hasSetP(level3Features) { + return 3 + } + if c.featureSet.hasSetP(level2Features) { + return 2 + } + if c.featureSet.hasSetP(level1Features) { + return 1 + } + return 0 +} + +// Disable will disable one or several features. +func (c *CPUInfo) Disable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.unset(id) + } + return true +} + +// Enable will disable one or several features even if they were undetected. +// This is of course not recommended for obvious reasons. +func (c *CPUInfo) Enable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.set(id) + } + return true +} + +// IsVendor returns true if vendor is recognized as Intel +func (c CPUInfo) IsVendor(v Vendor) bool { + return c.VendorID == v +} + +// FeatureSet returns all available features as strings. +func (c CPUInfo) FeatureSet() []string { + s := make([]string, 0, c.featureSet.nEnabled()) + s = append(s, c.featureSet.Strings()...) + return s +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.Has(RDTSCP) { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.Has(RDTSCP) { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// SveLengths returns arm SVE vector and predicate lengths in bits. +// Will return 0, 0 if SVE is not enabled or otherwise unable to detect. +func (c CPUInfo) SveLengths() (vl, pl uint64) { + if !c.Has(SVE) { + return 0, 0 + } + return getVectorLength() +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// frequencies tries to compute the clock speed of the CPU. If leaf 15 is +// supported, use it, otherwise parse the brand string. Yes, really. +func (c *CPUInfo) frequencies() { + c.Hz, c.BoostFreq = 0, 0 + mfi := maxFunctionID() + if mfi >= 0x15 { + eax, ebx, ecx, _ := cpuid(0x15) + if eax != 0 && ebx != 0 && ecx != 0 { + c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) + } + } + if mfi >= 0x16 { + a, b, _, _ := cpuid(0x16) + // Base... + if a&0xffff > 0 { + c.Hz = int64(a&0xffff) * 1_000_000 + } + // Boost... + if b&0xffff > 0 { + c.BoostFreq = int64(b&0xffff) * 1_000_000 + } + } + if c.Hz > 0 { + return + } + + // computeHz determines the official rated speed of a CPU from its brand + // string. This insanity is *actually the official documented way to do + // this according to Intel*, prior to leaf 0x15 existing. The official + // documentation only shows this working for exactly `x.xx` or `xxxx` + // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other + // sizes. + model := c.BrandName + hz := strings.LastIndex(model, "Hz") + if hz < 3 { + return + } + var multiplier int64 + switch model[hz-1] { + case 'M': + multiplier = 1000 * 1000 + case 'G': + multiplier = 1000 * 1000 * 1000 + case 'T': + multiplier = 1000 * 1000 * 1000 * 1000 + } + if multiplier == 0 { + return + } + freq := int64(0) + divisor := int64(0) + decimalShift := int64(1) + var i int + for i = hz - 2; i >= 0 && model[i] != ' '; i-- { + if model[i] >= '0' && model[i] <= '9' { + freq += int64(model[i]-'0') * decimalShift + decimalShift *= 10 + } else if model[i] == '.' { + if divisor != 0 { + return + } + divisor = decimalShift + } else { + return + } + } + // we didn't find a space + if i < 0 { + return + } + if divisor != 0 { + c.Hz = (freq * multiplier) / divisor + return + } + c.Hz = freq * multiplier +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. +func (c CPUInfo) VM() bool { + return CPU.featureSet.inSet(HYPERVISOR) +} + +// flags contains detected cpu features and characteristics +type flags uint64 + +// log2(bits_in_uint64) +const flagBitsLog2 = 6 +const flagBits = 1 << flagBitsLog2 +const flagMask = flagBits - 1 + +// flagSet contains detected cpu features and characteristics in an array of flags +type flagSet [(lastID + flagMask) / flagBits]flags + +func (s *flagSet) inSet(feat FeatureID) bool { + return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 +} + +func (s *flagSet) set(feat FeatureID) { + s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) +} + +// setIf will set a feature if boolean is true. +func (s *flagSet) setIf(cond bool, features ...FeatureID) { + if cond { + for _, offset := range features { + s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) + } + } +} + +func (s *flagSet) unset(offset FeatureID) { + bit := flags(1 << (offset & flagMask)) + s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit +} + +// or with another flagset. +func (s *flagSet) or(other flagSet) { + for i, v := range other[:] { + s[i] |= v + } +} + +// hasSet returns whether all features are present. +func (s *flagSet) hasSet(other flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasSet returns whether all features are present. +func (s *flagSet) hasSetP(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasOneOf returns whether one or more features are present. +func (s *flagSet) hasOneOf(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != 0 { + return true + } + } + return false +} + +// nEnabled will return the number of enabled flags. +func (s *flagSet) nEnabled() (n int) { + for _, v := range s[:] { + n += bits.OnesCount64(uint64(v)) + } + return n +} + +func flagSetWith(feat ...FeatureID) flagSet { + var res flagSet + for _, f := range feat { + res.set(f) + } + return res +} + +// ParseFeature will parse the string and return the ID of the matching feature. +// Will return UNKNOWN if not found. +func ParseFeature(s string) FeatureID { + s = strings.ToUpper(s) + for i := firstID; i < lastID; i++ { + if i.String() == s { + return i + } + } + return UNKNOWN +} + +// Strings returns an array of the detected features for FlagsSet. +func (s flagSet) Strings() []string { + if len(s) == 0 { + return []string{""} + } + r := make([]string, 0) + for i := firstID; i < lastID; i++ { + if s.inSet(i) { + r = append(r, i.String()) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + vend, _ := vendorID() + + if mfi < 0x4 || (vend != Intel && vend != AMD) { + return 1 + } + + if mfi < 0xb { + if vend != Intel { + return 1 + } + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + if vend == AMD { + // if >= Zen 2 0x8000001e EBX 15-8 bits means threads per core. + // The number of threads per core is ThreadsPerCore+1 + // See PPR for AMD Family 17h Models 00h-0Fh (page 82) + fam, _, _ := familyModel() + _, _, _, d := cpuid(1) + if (d&(1<<28)) != 0 && fam >= 23 { + if maxExtendedFunction() >= 0x8000001e { + _, b, _, _ := cpuid(0x8000001e) + return int((b>>8)&0xff) + 1 + } + return 2 + } + } + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + v, _ := vendorID() + switch v { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD, Hygon: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (family, model, stepping int) { + if maxFunctionID() < 0x1 { + return 0, 0, 0 + } + eax, _, _, _ := cpuid(1) + // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0]. + family = int((eax >> 8) & 0xf) + extFam := family == 0x6 // Intel is 0x6, needs extended model. + if family == 0xf { + // Add ExtFamily + family += int((eax >> 20) & 0xff) + extFam = true + } + // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0]. + model = int((eax >> 4) & 0xf) + if extFam { + // Add ExtModel + model += int((eax >> 12) & 0xf0) + } + stepping = int(eax & 0xf) + return family, model, stepping +} + +func physicalCores() int { + v, _ := vendorID() + switch v { + case Intel: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + return 0 + case AMD, Hygon: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + + // The following is inaccurate on AMD EPYC 7742 64-Core Processor + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + if c&0xff > 0 { + return int(c&0xff) + 1 + } + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVM": KVM, + "Linux KVM Hv": KVM, + "TCGTCGTCGTCG": QEMU, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, + "bhyve bhyve ": Bhyve, + "HygonGenuine": Hygon, + "Vortex86 SoC": SiS, + "SiS SiS SiS ": SiS, + "RiseRiseRise": SiS, + "Genuine RDC": RDC, + "QNXQVMBSQG": QNX, + "ACRNACRNACRN": ACRN, + "SRESRESRESRE": SRE, + "Apple VZ": Apple, +} + +func vendorID() (Vendor, string) { + _, b, c, d := cpuid(0) + v := string(valAsString(b, d, c)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func hypervisorVendorID() (Vendor, string) { + // https://lwn.net/Articles/301888/ + _, b, c, d := cpuid(0x40000000) + v := string(valAsString(b, c, d)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor, _ := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0 + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD, Hygon: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + + // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties + if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) { + return + } + + // Xen Hypervisor is buggy and returns the same entry no matter ECX value. + // Hack: When we encounter the same entry 100 times we break. + nSame := 0 + var last uint32 + for i := uint32(0); i < math.MaxUint32; i++ { + eax, ebx, ecx, _ := cpuidex(0x8000001D, i) + + level := (eax >> 5) & 7 + cacheNumSets := ecx + 1 + cacheLineSize := 1 + (ebx & 2047) + cachePhysPartitions := 1 + ((ebx >> 12) & 511) + cacheNumWays := 1 + ((ebx >> 22) & 511) + + typ := eax & 15 + size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) + if typ == 0 { + return + } + + // Check for the same value repeated. + comb := eax ^ ebx ^ ecx + if comb == last { + nSame++ + if nSame == 100 { + return + } + } + last = comb + + switch level { + case 1: + switch typ { + case 1: + // Data cache + c.Cache.L1D = size + case 2: + // Inst cache + c.Cache.L1I = size + default: + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + } +} + +type SGXEPCSection struct { + BaseAddress uint64 + EPCSize uint64 +} + +type SGXSupport struct { + Available bool + LaunchControl bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 + EPCSections []SGXEPCSection +} + +func hasSGX(available, lc bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + rval.LaunchControl = lc + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + rval.EPCSections = make([]SGXEPCSection, 0) + + for subleaf := uint32(2); subleaf < 2+8; subleaf++ { + eax, ebx, ecx, edx := cpuidex(0x12, subleaf) + leafType := eax & 0xf + + if leafType == 0 { + // Invalid subleaf, stop iterating + break + } else if leafType == 1 { + // EPC Section subleaf + baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) + size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) + + section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} + rval.EPCSections = append(rval.EPCSections, section) + } + } + + return +} + +type AMDMemEncryptionSupport struct { + Available bool + CBitPossition uint32 + NumVMPL uint32 + PhysAddrReduction uint32 + NumEntryptedGuests uint32 + MinSevNoEsAsid uint32 +} + +func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) { + rval.Available = available + if !available { + return + } + + _, b, c, d := cpuidex(0x8000001f, 0) + + rval.CBitPossition = b & 0x3f + rval.PhysAddrReduction = (b >> 6) & 0x3F + rval.NumVMPL = (b >> 12) & 0xf + rval.NumEntryptedGuests = c + rval.MinSevNoEsAsid = d + + return +} + +func support() flagSet { + var fs flagSet + mfi := maxFunctionID() + vend, _ := vendorID() + if mfi < 0x1 { + return fs + } + family, model, _ := familyModel() + + _, _, c, d := cpuid(1) + fs.setIf((d&(1<<0)) != 0, X87) + fs.setIf((d&(1<<8)) != 0, CMPXCHG8) + fs.setIf((d&(1<<11)) != 0, SYSEE) + fs.setIf((d&(1<<15)) != 0, CMOV) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<24)) != 0, FXSR) + fs.setIf((d&(1<<25)) != 0, FXSROPT) + fs.setIf((d&(1<<25)) != 0, SSE) + fs.setIf((d&(1<<26)) != 0, SSE2) + fs.setIf((c&1) != 0, SSE3) + fs.setIf((c&(1<<5)) != 0, VMX) + fs.setIf((c&(1<<9)) != 0, SSSE3) + fs.setIf((c&(1<<19)) != 0, SSE4) + fs.setIf((c&(1<<20)) != 0, SSE42) + fs.setIf((c&(1<<25)) != 0, AESNI) + fs.setIf((c&(1<<1)) != 0, CLMUL) + fs.setIf(c&(1<<22) != 0, MOVBE) + fs.setIf(c&(1<<23) != 0, POPCNT) + fs.setIf(c&(1<<30) != 0, RDRAND) + + // This bit has been reserved by Intel & AMD for use by hypervisors, + // and indicates the presence of a hypervisor. + fs.setIf(c&(1<<31) != 0, HYPERVISOR) + fs.setIf(c&(1<<29) != 0, F16C) + fs.setIf(c&(1<<13) != 0, CX16) + + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + fs.setIf(c&1<<26 != 0, XSAVE) + fs.setIf(c&1<<27 != 0, OSXSAVE) + // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits + const avxCheck = 1<<26 | 1<<27 | 1<<28 + if c&avxCheck == avxCheck { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + fs.set(AVX) + switch vend { + case Intel: + // Older than Haswell. + fs.setIf(family == 6 && model < 60, AVXSLOW) + case AMD: + // Older than Zen 2 + fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW) + } + } + } + // FMA3 can be used with SSE registers, so no OS support is strictly needed. + // fma3 and OSXSAVE needed. + const fma3Check = 1<<12 | 1<<27 + fs.setIf(c&fma3Check == fma3Check, FMA3) + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + if fs.inSet(AVX) && (ebx&0x00000020) != 0 { + fs.set(AVX2) + } + // CPUID.(EAX=7, ECX=0).EBX + if (ebx & 0x00000008) != 0 { + fs.set(BMI1) + fs.setIf((ebx&0x00000100) != 0, BMI2) + } + fs.setIf(ebx&(1<<2) != 0, SGX) + fs.setIf(ebx&(1<<4) != 0, HLE) + fs.setIf(ebx&(1<<9) != 0, ERMS) + fs.setIf(ebx&(1<<11) != 0, RTM) + fs.setIf(ebx&(1<<14) != 0, MPX) + fs.setIf(ebx&(1<<18) != 0, RDSEED) + fs.setIf(ebx&(1<<19) != 0, ADX) + fs.setIf(ebx&(1<<29) != 0, SHA) + + // CPUID.(EAX=7, ECX=0).ECX + fs.setIf(ecx&(1<<5) != 0, WAITPKG) + fs.setIf(ecx&(1<<7) != 0, CETSS) + fs.setIf(ecx&(1<<8) != 0, GFNI) + fs.setIf(ecx&(1<<9) != 0, VAES) + fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) + fs.setIf(ecx&(1<<13) != 0, TME) + fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<23) != 0, KEYLOCKER) + fs.setIf(ecx&(1<<27) != 0, MOVDIRI) + fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) + fs.setIf(ecx&(1<<29) != 0, ENQCMD) + fs.setIf(ecx&(1<<30) != 0, SGXLC) + + // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<4) != 0, FSRM) + fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL) + fs.setIf(edx&(1<<10) != 0, MD_CLEAR) + fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) + fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<15) != 0, HYBRID_CPU) + fs.setIf(edx&(1<<16) != 0, TSXLDTRK) + fs.setIf(edx&(1<<18) != 0, PCONFIG) + fs.setIf(edx&(1<<20) != 0, CETIBT) + fs.setIf(edx&(1<<26) != 0, IBPB) + fs.setIf(edx&(1<<27) != 0, STIBP) + fs.setIf(edx&(1<<28) != 0, FLUSH_L1D) + fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP) + fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP) + fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD) + + // CPUID.(EAX=7, ECX=1).EAX + eax1, _, _, edx1 := cpuidex(7, 1) + fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) + fs.setIf(eax1&(1<<1) != 0, SM3_X86) + fs.setIf(eax1&(1<<2) != 0, SM4_X86) + fs.setIf(eax1&(1<<7) != 0, CMPCCXADD) + fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) + fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT) + fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT) + fs.setIf(eax1&(1<<22) != 0, HRESET) + fs.setIf(eax1&(1<<23) != 0, AVXIFMA) + fs.setIf(eax1&(1<<26) != 0, LAM) + + // CPUID.(EAX=7, ECX=1).EDX + fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) + fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<6) != 0, AMXTRANSPOSE) + fs.setIf(edx1&(1<<7) != 0, AMXTF32) + fs.setIf(edx1&(1<<8) != 0, AMXCOMPLEX) + fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) + fs.setIf(edx1&(1<<14) != 0, PREFETCHI) + fs.setIf(edx1&(1<<19) != 0, AVX10) + fs.setIf(edx1&(1<<21) != 0, APX_F) + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3 + if runtime.GOOS == "darwin" { + hasAVX512 = fs.inSet(AVX) && darwinHasAVX512() + } + if hasAVX512 { + fs.setIf(ebx&(1<<16) != 0, AVX512F) + fs.setIf(ebx&(1<<17) != 0, AVX512DQ) + fs.setIf(ebx&(1<<21) != 0, AVX512IFMA) + fs.setIf(ebx&(1<<26) != 0, AVX512PF) + fs.setIf(ebx&(1<<27) != 0, AVX512ER) + fs.setIf(ebx&(1<<28) != 0, AVX512CD) + fs.setIf(ebx&(1<<30) != 0, AVX512BW) + fs.setIf(ebx&(1<<31) != 0, AVX512VL) + // ecx + fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<3) != 0, AMXFP8) + fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) + fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) + fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) + fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) + // edx + fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) + fs.setIf(edx&(1<<22) != 0, AMXBF16) + fs.setIf(edx&(1<<23) != 0, AVX512FP16) + fs.setIf(edx&(1<<24) != 0, AMXTILE) + fs.setIf(edx&(1<<25) != 0, AMXINT8) + // eax1 = CPUID.(EAX=7, ECX=1).EAX + fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<19) != 0, WRMSRNS) + fs.setIf(eax1&(1<<21) != 0, AMXFP16) + fs.setIf(eax1&(1<<27) != 0, MSRLIST) + } + } + + // CPUID.(EAX=7, ECX=2) + _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<0) != 0, PSFD) + fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL) + fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL) + fs.setIf(edx&(1<<4) != 0, BHI_CTRL) + fs.setIf(edx&(1<<5) != 0, MCDT_NO) + + // Add keylocker features. + if fs.inSet(KEYLOCKER) && mfi >= 0x19 { + _, ebx, _, _ := cpuidex(0x19, 0) + fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4) + } + + // Add AVX10 features. + if fs.inSet(AVX10) && mfi >= 0x24 { + _, ebx, _, _ := cpuidex(0x24, 0) + fs.setIf(ebx&(1<<16) != 0, AVX10_128) + fs.setIf(ebx&(1<<17) != 0, AVX10_256) + fs.setIf(ebx&(1<<18) != 0, AVX10_512) + } + } + + // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) + // EAX + // Bit 00: XSAVEOPT is available. + // Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set. + // Bit 02: Supports XGETBV with ECX = 1 if set. + // Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set. + // Bits 31 - 04: Reserved. + // EBX + // Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS. + // ECX + // Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1. + // EDX? + // Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved. + if mfi >= 0xd { + if fs.inSet(XSAVE) { + eax, _, _, _ := cpuidex(0xd, 1) + fs.setIf(eax&(1<<0) != 0, XSAVEOPT) + fs.setIf(eax&(1<<1) != 0, XSAVEC) + fs.setIf(eax&(1<<2) != 0, XGETBV1) + fs.setIf(eax&(1<<3) != 0, XSAVES) + } + } + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + fs.set(LZCNT) + fs.set(POPCNT) + } + // ECX + fs.setIf((c&(1<<0)) != 0, LAHF) + fs.setIf((c&(1<<2)) != 0, SVM) + fs.setIf((c&(1<<6)) != 0, SSE4A) + fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((c&(1<<22)) != 0, TOPEXT) + + // EDX + fs.setIf(d&(1<<11) != 0, SYSCALL) + fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<22) != 0, MMXEXT) + fs.setIf(d&(1<<23) != 0, MMX) + fs.setIf(d&(1<<24) != 0, FXSR) + fs.setIf(d&(1<<25) != 0, FXSROPT) + fs.setIf(d&(1<<27) != 0, RDTSCP) + fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT) + fs.setIf(d&(1<<31) != 0, AMD3DNOW) + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if fs.inSet(AVX) { + fs.setIf((c&(1<<11)) != 0, XOP) + fs.setIf((c&(1<<16)) != 0, FMA4) + } + + } + if maxExtendedFunction() >= 0x80000007 { + _, b, _, d := cpuid(0x80000007) + fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) + fs.setIf((b&(1<<1)) != 0, SUCCOR) + fs.setIf((b&(1<<2)) != 0, HWA) + fs.setIf((d&(1<<9)) != 0, CPBOOST) + } + + if maxExtendedFunction() >= 0x80000008 { + _, b, _, _ := cpuid(0x80000008) + fs.setIf(b&(1<<28) != 0, PSFD) + fs.setIf(b&(1<<27) != 0, CPPC) + fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD) + fs.setIf(b&(1<<23) != 0, PPIN) + fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED) + fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS) + fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP) + fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED) + fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON) + fs.setIf(b&(1<<15) != 0, STIBP) + fs.setIf(b&(1<<14) != 0, IBRS) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf(b&(1<<12) != 0, IBPB) + fs.setIf((b&(1<<9)) != 0, WBNOINVD) + fs.setIf((b&(1<<8)) != 0, MCOMMIT) + fs.setIf((b&(1<<4)) != 0, RDPRU) + fs.setIf((b&(1<<3)) != 0, INVLPGB) + fs.setIf((b&(1<<1)) != 0, MSRIRC) + fs.setIf((b&(1<<0)) != 0, CLZERO) + } + + if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A { + _, _, _, edx := cpuid(0x8000000A) + fs.setIf((edx>>0)&1 == 1, SVMNP) + fs.setIf((edx>>1)&1 == 1, LBRVIRT) + fs.setIf((edx>>2)&1 == 1, SVML) + fs.setIf((edx>>3)&1 == 1, NRIPS) + fs.setIf((edx>>4)&1 == 1, TSCRATEMSR) + fs.setIf((edx>>5)&1 == 1, VMCBCLEAN) + fs.setIf((edx>>6)&1 == 1, SVMFBASID) + fs.setIf((edx>>7)&1 == 1, SVMDA) + fs.setIf((edx>>10)&1 == 1, SVMPF) + fs.setIf((edx>>12)&1 == 1, SVMPFT) + } + + if maxExtendedFunction() >= 0x8000001a { + eax, _, _, _ := cpuid(0x8000001a) + fs.setIf((eax>>0)&1 == 1, FP128) + fs.setIf((eax>>1)&1 == 1, MOVU) + fs.setIf((eax>>2)&1 == 1, FP256) + } + + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { + eax, _, _, _ := cpuid(0x8000001b) + fs.setIf((eax>>0)&1 == 1, IBSFFV) + fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM) + fs.setIf((eax>>2)&1 == 1, IBSOPSAM) + fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT) + fs.setIf((eax>>4)&1 == 1, IBSOPCNT) + fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) + fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) + fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE) + fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX) + fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1. + fs.setIf((eax>>11)&1 == 1, IBS_ZEN4) + } + + if maxExtendedFunction() >= 0x8000001f && vend == AMD { + a, _, _, _ := cpuid(0x8000001f) + fs.setIf((a>>0)&1 == 1, SME) + fs.setIf((a>>1)&1 == 1, SEV) + fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH) + fs.setIf((a>>3)&1 == 1, SEV_ES) + fs.setIf((a>>4)&1 == 1, SEV_SNP) + fs.setIf((a>>5)&1 == 1, VMPL) + fs.setIf((a>>10)&1 == 1, SME_COHERENT) + fs.setIf((a>>11)&1 == 1, SEV_64BIT) + fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED) + fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE) + fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP) + fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST) + fs.setIf((a>>16)&1 == 1, VTE) + fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) + } + + if maxExtendedFunction() >= 0x80000021 && vend == AMD { + a, _, _, _ := cpuid(0x80000021) + fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX) + fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO) + fs.setIf((a>>29)&1 == 1, SRSO_NO) + fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE) + fs.setIf((a>>27)&1 == 1, SBPB) + } + + if mfi >= 0x20 { + // Microsoft has decided to purposefully hide the information + // of the guest TEE when VMs are being created using Hyper-V. + // + // This leads us to check for the Hyper-V cpuid features + // (0x4000000C), and then for the `ebx` value set. + // + // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part + // we're mostly interested about,according to: + // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174 + _, ebx, _, _ := cpuid(0x4000000C) + fs.setIf(ebx == 0xbe3, TDX_GUEST) + } + + if mfi >= 0x21 { + // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21). + _, ebx, ecx, edx := cpuid(0x21) + identity := string(valAsString(ebx, edx, ecx)) + fs.setIf(identity == "IntelTDX ", TDX_GUEST) + } + + return fs +} + +func (c *CPUInfo) supportAVX10() uint8 { + if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) { + _, ebx, _, _ := cpuidex(0x24, 0) + return uint8(ebx) + } + return 0 +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s new file mode 100644 index 000000000000..8587c3a1fc55 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s @@ -0,0 +1,47 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build 386,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0 + MOVL $0, eax+0(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s new file mode 100644 index 000000000000..bc11f8942193 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s @@ -0,0 +1,72 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// From https://go-review.googlesource.com/c/sys/+/285572/ +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0-1 + MOVB $0, ret+0(FP) // default to false + +#ifdef GOOS_darwin // return if not darwin +#ifdef GOARCH_amd64 // return if not amd64 +// These values from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define hasAVX512F 0x0000004000000000 + MOVQ $commpage64_version, BX + MOVW (BX), AX + CMPW AX, $13 // versions < 13 do not support AVX512 + JL no_avx512 + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ (BX), AX + MOVQ $hasAVX512F, CX + ANDQ CX, AX + JZ no_avx512 + MOVB $1, ret+0(FP) + +no_avx512: +#endif +#endif + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s new file mode 100644 index 000000000000..b196f78eb447 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -0,0 +1,36 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build arm64,!gccgo,!noasm,!appengine + +// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt + +// func getMidr +TEXT ·getMidr(SB), 7, $0 + WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ + MOVD R0, midr+0(FP) + RET + +// func getProcFeatures +TEXT ·getProcFeatures(SB), 7, $0 + WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ + MOVD R0, procFeatures+0(FP) + RET + +// func getInstAttributes +TEXT ·getInstAttributes(SB), 7, $0 + WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ + WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ + MOVD R0, instAttrReg0+0(FP) + MOVD R1, instAttrReg1+8(FP) + RET + +TEXT ·getVectorLength(SB), 7, $0 + WORD $0xd2800002 // mov x2, #0 + WORD $0x04225022 // addvl x2, x2, #1 + WORD $0xd37df042 // lsl x2, x2, #3 + WORD $0xd2800003 // mov x3, #0 + WORD $0x04635023 // addpl x3, x3, #1 + WORD $0xd37df063 // lsl x3, x3, #3 + MOVD R2, vl+0(FP) + MOVD R3, pl+8(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go new file mode 100644 index 000000000000..9ae32d607dc1 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -0,0 +1,250 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !gccgo && !noasm && !appengine +// +build arm64,!gccgo,!noasm,!appengine + +package cpuid + +import "runtime" + +func getMidr() (midr uint64) +func getProcFeatures() (procFeatures uint64) +func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) +func getVectorLength() (vl, pl uint64) + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(c *CPUInfo, safe bool) { + // Seems to be safe to assume on ARM64 + c.CacheLine = 64 + detectOS(c) + + // ARM64 disabled since it may crash if interrupt is not intercepted by OS. + if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" { + return + } + midr := getMidr() + + // MIDR_EL1 - Main ID Register + // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | Implementer | [31-24] | y | + // |--------------------------------------------------| + // | Variant | [23-20] | y | + // |--------------------------------------------------| + // | Architecture | [19-16] | y | + // |--------------------------------------------------| + // | PartNum | [15-4] | y | + // |--------------------------------------------------| + // | Revision | [3-0] | y | + // x--------------------------------------------------x + + switch (midr >> 24) & 0xff { + case 0xC0: + c.VendorString = "Ampere Computing" + c.VendorID = Ampere + case 0x41: + c.VendorString = "Arm Limited" + c.VendorID = ARM + case 0x42: + c.VendorString = "Broadcom Corporation" + c.VendorID = Broadcom + case 0x43: + c.VendorString = "Cavium Inc" + c.VendorID = Cavium + case 0x44: + c.VendorString = "Digital Equipment Corporation" + c.VendorID = DEC + case 0x46: + c.VendorString = "Fujitsu Ltd" + c.VendorID = Fujitsu + case 0x49: + c.VendorString = "Infineon Technologies AG" + c.VendorID = Infineon + case 0x4D: + c.VendorString = "Motorola or Freescale Semiconductor Inc" + c.VendorID = Motorola + case 0x4E: + c.VendorString = "NVIDIA Corporation" + c.VendorID = NVIDIA + case 0x50: + c.VendorString = "Applied Micro Circuits Corporation" + c.VendorID = AMCC + case 0x51: + c.VendorString = "Qualcomm Inc" + c.VendorID = Qualcomm + case 0x56: + c.VendorString = "Marvell International Ltd" + c.VendorID = Marvell + case 0x69: + c.VendorString = "Intel Corporation" + c.VendorID = Intel + } + + // Lower 4 bits: Architecture + // Architecture Meaning + // 0b0001 Armv4. + // 0b0010 Armv4T. + // 0b0011 Armv5 (obsolete). + // 0b0100 Armv5T. + // 0b0101 Armv5TE. + // 0b0110 Armv5TEJ. + // 0b0111 Armv6. + // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. + // Upper 4 bit: Variant + // An IMPLEMENTATION DEFINED variant number. + // Typically, this field is used to distinguish between different product variants, or major revisions of a product. + c.Family = int(midr>>16) & 0xff + + // PartNum, bits [15:4] + // An IMPLEMENTATION DEFINED primary part number for the device. + // On processors implemented by Arm, if the top four bits of the primary + // part number are 0x0 or 0x7, the variant and architecture are encoded differently. + // Revision, bits [3:0] + // An IMPLEMENTATION DEFINED revision number for the device. + c.Model = int(midr) & 0xffff + + procFeatures := getProcFeatures() + + // ID_AA64PFR0_EL1 - Processor Feature Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | DIT | [51-48] | y | + // |--------------------------------------------------| + // | SVE | [35-32] | y | + // |--------------------------------------------------| + // | GIC | [27-24] | n | + // |--------------------------------------------------| + // | AdvSIMD | [23-20] | y | + // |--------------------------------------------------| + // | FP | [19-16] | y | + // |--------------------------------------------------| + // | EL3 | [15-12] | n | + // |--------------------------------------------------| + // | EL2 | [11-8] | n | + // |--------------------------------------------------| + // | EL1 | [7-4] | n | + // |--------------------------------------------------| + // | EL0 | [3-0] | n | + // x--------------------------------------------------x + + var f flagSet + // if procFeatures&(0xf<<48) != 0 { + // fmt.Println("DIT") + // } + f.setIf(procFeatures&(0xf<<32) != 0, SVE) + if procFeatures&(0xf<<20) != 15<<20 { + f.set(ASIMD) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 + // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. + f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) + } + f.setIf(procFeatures&(0xf<<16) != 0, FP) + + instAttrReg0, instAttrReg1 := getInstAttributes() + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // + // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | RNDR | [63-60] | y | + // |--------------------------------------------------| + // | TLB | [59-56] | y | + // |--------------------------------------------------| + // | TS | [55-52] | y | + // |--------------------------------------------------| + // | FHM | [51-48] | y | + // |--------------------------------------------------| + // | DP | [47-44] | y | + // |--------------------------------------------------| + // | SM4 | [43-40] | y | + // |--------------------------------------------------| + // | SM3 | [39-36] | y | + // |--------------------------------------------------| + // | SHA3 | [35-32] | y | + // |--------------------------------------------------| + // | RDM | [31-28] | y | + // |--------------------------------------------------| + // | ATOMICS | [23-20] | y | + // |--------------------------------------------------| + // | CRC32 | [19-16] | y | + // |--------------------------------------------------| + // | SHA2 | [15-12] | y | + // |--------------------------------------------------| + // | SHA1 | [11-8] | y | + // |--------------------------------------------------| + // | AES | [7-4] | y | + // x--------------------------------------------------x + + f.setIf(instAttrReg0&(0xf<<60) != 0, RNDR) + f.setIf(instAttrReg0&(0xf<<56) != 0, TLB) + f.setIf(instAttrReg0&(0xf<<52) != 0, TS) + f.setIf(instAttrReg0&(0xf<<48) != 0, FHM) + f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) + f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) + f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) + f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) + f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) + f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) + f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) + f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. + f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) + f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) + f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. + f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 + // + // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | GPI | [31-28] | y | + // |--------------------------------------------------| + // | GPA | [27-24] | y | + // |--------------------------------------------------| + // | LRCPC | [23-20] | y | + // |--------------------------------------------------| + // | FCMA | [19-16] | y | + // |--------------------------------------------------| + // | JSCVT | [15-12] | y | + // |--------------------------------------------------| + // | API | [11-8] | y | + // |--------------------------------------------------| + // | APA | [7-4] | y | + // |--------------------------------------------------| + // | DPB | [3-0] | y | + // x--------------------------------------------------x + + // if instAttrReg1&(0xf<<28) != 0 { + // fmt.Println("GPI") + // } + f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) + f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) + f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) + f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) + // if instAttrReg1&(0xf<<8) != 0 { + // fmt.Println("API") + // } + // if instAttrReg1&(0xf<<4) != 0 { + // fmt.Println("APA") + // } + f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) + + // Store + c.featureSet.or(f) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go new file mode 100644 index 000000000000..574f9389c07e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine +// +build !amd64,!386,!arm64 gccgo noasm appengine + +package cpuid + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } + +} + +func addInfo(info *CPUInfo, safe bool) {} +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go new file mode 100644 index 000000000000..f924c9d8399a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -0,0 +1,41 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine) +// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +func asmDarwinHasAVX512() bool + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm + darwinHasAVX512 = asmDarwinHasAVX512 +} + +func addInfo(c *CPUInfo, safe bool) { + c.maxFunc = maxFunctionID() + c.maxExFunc = maxExtendedFunction() + c.BrandName = brandName() + c.CacheLine = cacheLine() + c.Family, c.Model, c.Stepping = familyModel() + c.featureSet = support() + c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) + c.ThreadsPerCore = threadsPerCore() + c.LogicalCores = logicalCores() + c.PhysicalCores = physicalCores() + c.VendorID, c.VendorString = vendorID() + c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID() + c.AVX10Level = c.supportAVX10() + c.cacheSize() + c.frequencies() +} + +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go new file mode 100644 index 000000000000..07704351faf9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -0,0 +1,300 @@ +// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. + +package cpuid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ADX-1] + _ = x[AESNI-2] + _ = x[AMD3DNOW-3] + _ = x[AMD3DNOWEXT-4] + _ = x[AMXBF16-5] + _ = x[AMXFP16-6] + _ = x[AMXINT8-7] + _ = x[AMXFP8-8] + _ = x[AMXTILE-9] + _ = x[AMXTF32-10] + _ = x[AMXCOMPLEX-11] + _ = x[AMXTRANSPOSE-12] + _ = x[APX_F-13] + _ = x[AVX-14] + _ = x[AVX10-15] + _ = x[AVX10_128-16] + _ = x[AVX10_256-17] + _ = x[AVX10_512-18] + _ = x[AVX2-19] + _ = x[AVX512BF16-20] + _ = x[AVX512BITALG-21] + _ = x[AVX512BW-22] + _ = x[AVX512CD-23] + _ = x[AVX512DQ-24] + _ = x[AVX512ER-25] + _ = x[AVX512F-26] + _ = x[AVX512FP16-27] + _ = x[AVX512IFMA-28] + _ = x[AVX512PF-29] + _ = x[AVX512VBMI-30] + _ = x[AVX512VBMI2-31] + _ = x[AVX512VL-32] + _ = x[AVX512VNNI-33] + _ = x[AVX512VP2INTERSECT-34] + _ = x[AVX512VPOPCNTDQ-35] + _ = x[AVXIFMA-36] + _ = x[AVXNECONVERT-37] + _ = x[AVXSLOW-38] + _ = x[AVXVNNI-39] + _ = x[AVXVNNIINT8-40] + _ = x[AVXVNNIINT16-41] + _ = x[BHI_CTRL-42] + _ = x[BMI1-43] + _ = x[BMI2-44] + _ = x[CETIBT-45] + _ = x[CETSS-46] + _ = x[CLDEMOTE-47] + _ = x[CLMUL-48] + _ = x[CLZERO-49] + _ = x[CMOV-50] + _ = x[CMPCCXADD-51] + _ = x[CMPSB_SCADBS_SHORT-52] + _ = x[CMPXCHG8-53] + _ = x[CPBOOST-54] + _ = x[CPPC-55] + _ = x[CX16-56] + _ = x[EFER_LMSLE_UNS-57] + _ = x[ENQCMD-58] + _ = x[ERMS-59] + _ = x[F16C-60] + _ = x[FLUSH_L1D-61] + _ = x[FMA3-62] + _ = x[FMA4-63] + _ = x[FP128-64] + _ = x[FP256-65] + _ = x[FSRM-66] + _ = x[FXSR-67] + _ = x[FXSROPT-68] + _ = x[GFNI-69] + _ = x[HLE-70] + _ = x[HRESET-71] + _ = x[HTT-72] + _ = x[HWA-73] + _ = x[HYBRID_CPU-74] + _ = x[HYPERVISOR-75] + _ = x[IA32_ARCH_CAP-76] + _ = x[IA32_CORE_CAP-77] + _ = x[IBPB-78] + _ = x[IBPB_BRTYPE-79] + _ = x[IBRS-80] + _ = x[IBRS_PREFERRED-81] + _ = x[IBRS_PROVIDES_SMP-82] + _ = x[IBS-83] + _ = x[IBSBRNTRGT-84] + _ = x[IBSFETCHSAM-85] + _ = x[IBSFFV-86] + _ = x[IBSOPCNT-87] + _ = x[IBSOPCNTEXT-88] + _ = x[IBSOPSAM-89] + _ = x[IBSRDWROPCNT-90] + _ = x[IBSRIPINVALIDCHK-91] + _ = x[IBS_FETCH_CTLX-92] + _ = x[IBS_OPDATA4-93] + _ = x[IBS_OPFUSE-94] + _ = x[IBS_PREVENTHOST-95] + _ = x[IBS_ZEN4-96] + _ = x[IDPRED_CTRL-97] + _ = x[INT_WBINVD-98] + _ = x[INVLPGB-99] + _ = x[KEYLOCKER-100] + _ = x[KEYLOCKERW-101] + _ = x[LAHF-102] + _ = x[LAM-103] + _ = x[LBRVIRT-104] + _ = x[LZCNT-105] + _ = x[MCAOVERFLOW-106] + _ = x[MCDT_NO-107] + _ = x[MCOMMIT-108] + _ = x[MD_CLEAR-109] + _ = x[MMX-110] + _ = x[MMXEXT-111] + _ = x[MOVBE-112] + _ = x[MOVDIR64B-113] + _ = x[MOVDIRI-114] + _ = x[MOVSB_ZL-115] + _ = x[MOVU-116] + _ = x[MPX-117] + _ = x[MSRIRC-118] + _ = x[MSRLIST-119] + _ = x[MSR_PAGEFLUSH-120] + _ = x[NRIPS-121] + _ = x[NX-122] + _ = x[OSXSAVE-123] + _ = x[PCONFIG-124] + _ = x[POPCNT-125] + _ = x[PPIN-126] + _ = x[PREFETCHI-127] + _ = x[PSFD-128] + _ = x[RDPRU-129] + _ = x[RDRAND-130] + _ = x[RDSEED-131] + _ = x[RDTSCP-132] + _ = x[RRSBA_CTRL-133] + _ = x[RTM-134] + _ = x[RTM_ALWAYS_ABORT-135] + _ = x[SBPB-136] + _ = x[SERIALIZE-137] + _ = x[SEV-138] + _ = x[SEV_64BIT-139] + _ = x[SEV_ALTERNATIVE-140] + _ = x[SEV_DEBUGSWAP-141] + _ = x[SEV_ES-142] + _ = x[SEV_RESTRICTED-143] + _ = x[SEV_SNP-144] + _ = x[SGX-145] + _ = x[SGXLC-146] + _ = x[SHA-147] + _ = x[SME-148] + _ = x[SME_COHERENT-149] + _ = x[SM3_X86-150] + _ = x[SM4_X86-151] + _ = x[SPEC_CTRL_SSBD-152] + _ = x[SRBDS_CTRL-153] + _ = x[SRSO_MSR_FIX-154] + _ = x[SRSO_NO-155] + _ = x[SRSO_USER_KERNEL_NO-156] + _ = x[SSE-157] + _ = x[SSE2-158] + _ = x[SSE3-159] + _ = x[SSE4-160] + _ = x[SSE42-161] + _ = x[SSE4A-162] + _ = x[SSSE3-163] + _ = x[STIBP-164] + _ = x[STIBP_ALWAYSON-165] + _ = x[STOSB_SHORT-166] + _ = x[SUCCOR-167] + _ = x[SVM-168] + _ = x[SVMDA-169] + _ = x[SVMFBASID-170] + _ = x[SVML-171] + _ = x[SVMNP-172] + _ = x[SVMPF-173] + _ = x[SVMPFT-174] + _ = x[SYSCALL-175] + _ = x[SYSEE-176] + _ = x[TBM-177] + _ = x[TDX_GUEST-178] + _ = x[TLB_FLUSH_NESTED-179] + _ = x[TME-180] + _ = x[TOPEXT-181] + _ = x[TSCRATEMSR-182] + _ = x[TSXLDTRK-183] + _ = x[VAES-184] + _ = x[VMCBCLEAN-185] + _ = x[VMPL-186] + _ = x[VMSA_REGPROT-187] + _ = x[VMX-188] + _ = x[VPCLMULQDQ-189] + _ = x[VTE-190] + _ = x[WAITPKG-191] + _ = x[WBNOINVD-192] + _ = x[WRMSRNS-193] + _ = x[X87-194] + _ = x[XGETBV1-195] + _ = x[XOP-196] + _ = x[XSAVE-197] + _ = x[XSAVEC-198] + _ = x[XSAVEOPT-199] + _ = x[XSAVES-200] + _ = x[AESARM-201] + _ = x[ARMCPUID-202] + _ = x[ASIMD-203] + _ = x[ASIMDDP-204] + _ = x[ASIMDHP-205] + _ = x[ASIMDRDM-206] + _ = x[ATOMICS-207] + _ = x[CRC32-208] + _ = x[DCPOP-209] + _ = x[EVTSTRM-210] + _ = x[FCMA-211] + _ = x[FHM-212] + _ = x[FP-213] + _ = x[FPHP-214] + _ = x[GPA-215] + _ = x[JSCVT-216] + _ = x[LRCPC-217] + _ = x[PMULL-218] + _ = x[RNDR-219] + _ = x[TLB-220] + _ = x[TS-221] + _ = x[SHA1-222] + _ = x[SHA2-223] + _ = x[SHA3-224] + _ = x[SHA512-225] + _ = x[SM3-226] + _ = x[SM4-227] + _ = x[SVE-228] + _ = x[lastID-229] + _ = x[firstID-0] +} + +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAMXTRANSPOSEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSM3_X86SM4_X86SPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID" + +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 97, 102, 105, 110, 119, 128, 137, 141, 151, 163, 171, 179, 187, 195, 202, 212, 222, 230, 240, 251, 259, 269, 287, 302, 309, 321, 328, 335, 346, 358, 366, 370, 374, 380, 385, 393, 398, 404, 408, 417, 435, 443, 450, 454, 458, 472, 478, 482, 486, 495, 499, 503, 508, 513, 517, 521, 528, 532, 535, 541, 544, 547, 557, 567, 580, 593, 597, 608, 612, 626, 643, 646, 656, 667, 673, 681, 692, 700, 712, 728, 742, 753, 763, 778, 786, 797, 807, 814, 823, 833, 837, 840, 847, 852, 863, 870, 877, 885, 888, 894, 899, 908, 915, 923, 927, 930, 936, 943, 956, 961, 963, 970, 977, 983, 987, 996, 1000, 1005, 1011, 1017, 1023, 1033, 1036, 1052, 1056, 1065, 1068, 1077, 1092, 1105, 1111, 1125, 1132, 1135, 1140, 1143, 1146, 1158, 1165, 1172, 1186, 1196, 1208, 1215, 1234, 1237, 1241, 1245, 1249, 1254, 1259, 1264, 1269, 1283, 1294, 1300, 1303, 1308, 1317, 1321, 1326, 1331, 1337, 1344, 1349, 1352, 1361, 1377, 1380, 1386, 1396, 1404, 1408, 1417, 1421, 1433, 1436, 1446, 1449, 1456, 1464, 1471, 1474, 1481, 1484, 1489, 1495, 1503, 1509, 1515, 1523, 1528, 1535, 1542, 1550, 1557, 1562, 1567, 1574, 1578, 1581, 1583, 1587, 1590, 1595, 1600, 1605, 1609, 1612, 1614, 1618, 1622, 1626, 1632, 1635, 1638, 1641, 1647} + +func (i FeatureID) String() string { + if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { + return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VendorUnknown-0] + _ = x[Intel-1] + _ = x[AMD-2] + _ = x[VIA-3] + _ = x[Transmeta-4] + _ = x[NSC-5] + _ = x[KVM-6] + _ = x[MSVM-7] + _ = x[VMware-8] + _ = x[XenHVM-9] + _ = x[Bhyve-10] + _ = x[Hygon-11] + _ = x[SiS-12] + _ = x[RDC-13] + _ = x[Ampere-14] + _ = x[ARM-15] + _ = x[Broadcom-16] + _ = x[Cavium-17] + _ = x[DEC-18] + _ = x[Fujitsu-19] + _ = x[Infineon-20] + _ = x[Motorola-21] + _ = x[NVIDIA-22] + _ = x[AMCC-23] + _ = x[Qualcomm-24] + _ = x[Marvell-25] + _ = x[QEMU-26] + _ = x[QNX-27] + _ = x[ACRN-28] + _ = x[SRE-29] + _ = x[Apple-30] + _ = x[lastVendor-31] +} + +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor" + +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174} + +func (i Vendor) String() string { + if i < 0 || i >= Vendor(len(_Vendor_index)-1) { + return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go new file mode 100644 index 000000000000..da07522e7cba --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -0,0 +1,129 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import ( + "runtime" + "strings" + + "golang.org/x/sys/unix" +) + +func detectOS(c *CPUInfo) bool { + if runtime.GOOS != "ios" { + tryToFillCPUInfoFomSysctl(c) + } + // There are no hw.optional sysctl values for the below features on Mac OS 11.0 + // to detect their supported state dynamically. Assume the CPU features that + // Apple Silicon M1 supports to be available as a minimal set of features + // to all Go programs running on darwin/arm64. + // TODO: Add more if we know them. + c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) + + return true +} + +func sysctlGetBool(name string) bool { + value, err := unix.SysctlUint32(name) + if err != nil { + return false + } + return value != 0 +} + +func sysctlGetString(name string) string { + value, err := unix.Sysctl(name) + if err != nil { + return "" + } + return value +} + +func sysctlGetInt(unknown int, names ...string) int { + for _, name := range names { + value, err := unix.SysctlUint32(name) + if err != nil { + continue + } + if value != 0 { + return int(value) + } + } + return unknown +} + +func sysctlGetInt64(unknown int, names ...string) int { + for _, name := range names { + value64, err := unix.SysctlUint64(name) + if err != nil { + continue + } + if int(value64) != unknown { + return int(value64) + } + } + return unknown +} + +func setFeature(c *CPUInfo, feature FeatureID, aliases ...string) { + for _, alias := range aliases { + set := sysctlGetBool(alias) + c.featureSet.setIf(set, feature) + if set { + break + } + } +} + +func tryToFillCPUInfoFomSysctl(c *CPUInfo) { + c.BrandName = sysctlGetString("machdep.cpu.brand_string") + + if len(c.BrandName) != 0 { + c.VendorString = strings.Fields(c.BrandName)[0] + } + + c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu") + c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") / + sysctlGetInt(1, "hw.physicalcpu") + c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count") + c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily") + c.Model = sysctlGetInt(0, "machdep.cpu.model") + c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") + c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize") + c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") + c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") + + // ARM features: + // + // Note: On some Apple Silicon system, some feats have aliases. See: + // https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics + // When so, we look at all aliases and consider a feature available when at least one identifier matches. + setFeature(c, AESARM, "hw.optional.arm.FEAT_AES") // AES instructions + setFeature(c, ASIMD, "hw.optional.arm.AdvSIMD", "hw.optional.neon") // Advanced SIMD + setFeature(c, ASIMDDP, "hw.optional.arm.FEAT_DotProd") // SIMD Dot Product + setFeature(c, ASIMDHP, "hw.optional.arm.AdvSIMD_HPFPCvt", "hw.optional.neon_hpfp") // Advanced SIMD half-precision floating point + setFeature(c, ASIMDRDM, "hw.optional.arm.FEAT_RDM") // Rounding Double Multiply Accumulate/Subtract + setFeature(c, ATOMICS, "hw.optional.arm.FEAT_LSE", "hw.optional.armv8_1_atomics") // Large System Extensions (LSE) + setFeature(c, CRC32, "hw.optional.arm.FEAT_CRC32", "hw.optional.armv8_crc32") // CRC32/CRC32C instructions + setFeature(c, DCPOP, "hw.optional.arm.FEAT_DPB") // Data cache clean to Point of Persistence (DC CVAP) + setFeature(c, EVTSTRM, "hw.optional.arm.FEAT_ECV") // Generic timer + setFeature(c, FCMA, "hw.optional.arm.FEAT_FCMA", "hw.optional.armv8_3_compnum") // Floating point complex number addition and multiplication + setFeature(c, FHM, "hw.optional.armv8_2_fhm", "hw.optional.arm.FEAT_FHM") // FMLAL and FMLSL instructions + setFeature(c, FP, "hw.optional.floatingpoint") // Single-precision and double-precision floating point + setFeature(c, FPHP, "hw.optional.arm.FEAT_FP16", "hw.optional.neon_fp16") // Half-precision floating point + setFeature(c, GPA, "hw.optional.arm.FEAT_PAuth") // Generic Pointer Authentication + setFeature(c, JSCVT, "hw.optional.arm.FEAT_JSCVT") // Javascript-style double->int convert (FJCVTZS) + setFeature(c, LRCPC, "hw.optional.arm.FEAT_LRCPC") // Weaker release consistency (LDAPR, etc) + setFeature(c, PMULL, "hw.optional.arm.FEAT_PMULL") // Polynomial Multiply instructions (PMULL/PMULL2) + setFeature(c, RNDR, "hw.optional.arm.FEAT_RNG") // Random Number instructions + setFeature(c, TLB, "hw.optional.arm.FEAT_TLBIOS", "hw.optional.arm.FEAT_TLBIRANGE") // Outer Shareable and TLB range maintenance instructions + setFeature(c, TS, "hw.optional.arm.FEAT_FlagM", "hw.optional.arm.FEAT_FlagM2") // Flag manipulation instructions + setFeature(c, SHA1, "hw.optional.arm.FEAT_SHA1") // SHA-1 instructions (SHA1C, etc) + setFeature(c, SHA2, "hw.optional.arm.FEAT_SHA256") // SHA-2 instructions (SHA256H, etc) + setFeature(c, SHA3, "hw.optional.arm.FEAT_SHA3") // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + setFeature(c, SHA512, "hw.optional.arm.FEAT_SHA512") // SHA512 instructions + setFeature(c, SM3, "hw.optional.arm.FEAT_SM3") // SM3 instructions + setFeature(c, SM4, "hw.optional.arm.FEAT_SM4") // SM4 instructions + setFeature(c, SVE, "hw.optional.arm.FEAT_SVE") // Scalable Vector Extension +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go new file mode 100644 index 000000000000..d96d24438b3e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -0,0 +1,208 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file located +// here https://github.com/golang/sys/blob/master/LICENSE + +package cpuid + +import ( + "encoding/binary" + "io/ioutil" + "runtime" +) + +// HWCAP bits. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + hwcap_USCAT = 1 << 25 + hwcap_ILRCPC = 1 << 26 + hwcap_FLAGM = 1 << 27 + hwcap_SSBS = 1 << 28 + hwcap_SB = 1 << 29 + hwcap_PACA = 1 << 30 + hwcap_PACG = 1 << 31 + hwcap_GCS = 1 << 32 + + hwcap2_DCPODP = 1 << 0 + hwcap2_SVE2 = 1 << 1 + hwcap2_SVEAES = 1 << 2 + hwcap2_SVEPMULL = 1 << 3 + hwcap2_SVEBITPERM = 1 << 4 + hwcap2_SVESHA3 = 1 << 5 + hwcap2_SVESM4 = 1 << 6 + hwcap2_FLAGM2 = 1 << 7 + hwcap2_FRINT = 1 << 8 + hwcap2_SVEI8MM = 1 << 9 + hwcap2_SVEF32MM = 1 << 10 + hwcap2_SVEF64MM = 1 << 11 + hwcap2_SVEBF16 = 1 << 12 + hwcap2_I8MM = 1 << 13 + hwcap2_BF16 = 1 << 14 + hwcap2_DGH = 1 << 15 + hwcap2_RNG = 1 << 16 + hwcap2_BTI = 1 << 17 + hwcap2_MTE = 1 << 18 + hwcap2_ECV = 1 << 19 + hwcap2_AFP = 1 << 20 + hwcap2_RPRES = 1 << 21 + hwcap2_MTE3 = 1 << 22 + hwcap2_SME = 1 << 23 + hwcap2_SME_I16I64 = 1 << 24 + hwcap2_SME_F64F64 = 1 << 25 + hwcap2_SME_I8I32 = 1 << 26 + hwcap2_SME_F16F32 = 1 << 27 + hwcap2_SME_B16F32 = 1 << 28 + hwcap2_SME_F32F32 = 1 << 29 + hwcap2_SME_FA64 = 1 << 30 + hwcap2_WFXT = 1 << 31 + hwcap2_EBF16 = 1 << 32 + hwcap2_SVE_EBF16 = 1 << 33 + hwcap2_CSSC = 1 << 34 + hwcap2_RPRFM = 1 << 35 + hwcap2_SVE2P1 = 1 << 36 + hwcap2_SME2 = 1 << 37 + hwcap2_SME2P1 = 1 << 38 + hwcap2_SME_I16I32 = 1 << 39 + hwcap2_SME_BI32I32 = 1 << 40 + hwcap2_SME_B16B16 = 1 << 41 + hwcap2_SME_F16F16 = 1 << 42 + hwcap2_MOPS = 1 << 43 + hwcap2_HBC = 1 << 44 + hwcap2_SVE_B16B16 = 1 << 45 + hwcap2_LRCPC3 = 1 << 46 + hwcap2_LSE128 = 1 << 47 + hwcap2_FPMR = 1 << 48 + hwcap2_LUT = 1 << 49 + hwcap2_FAMINMAX = 1 << 50 + hwcap2_F8CVT = 1 << 51 + hwcap2_F8FMA = 1 << 52 + hwcap2_F8DP4 = 1 << 53 + hwcap2_F8DP2 = 1 << 54 + hwcap2_F8E4M3 = 1 << 55 + hwcap2_F8E5M2 = 1 << 56 + hwcap2_SME_LUTV2 = 1 << 57 + hwcap2_SME_F8F16 = 1 << 58 + hwcap2_SME_F8F32 = 1 << 59 + hwcap2_SME_SF8FMA = 1 << 60 + hwcap2_SME_SF8DP4 = 1 << 61 + hwcap2_SME_SF8DP2 = 1 << 62 + hwcap2_POE = 1 << 63 +) + +func detectOS(c *CPUInfo) bool { + // For now assuming no hyperthreading is reasonable. + c.LogicalCores = runtime.NumCPU() + c.PhysicalCores = c.LogicalCores + c.ThreadsPerCore = 1 + if hwcap == 0 { + // We did not get values from the runtime. + // Try reading /proc/self/auxv + + // From https://github.com/golang/sys + const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + uintSize = int(32 << (^uint(0) >> 63)) + ) + + buf, err := ioutil.ReadFile("/proc/self/auxv") + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return false + } + bo := binary.LittleEndian + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwcap = val + case _AT_HWCAP2: + // Not used + } + } + if hwcap == 0 { + return false + } + } + + // HWCap was populated by the runtime from the auxiliary vector. + // Use HWCap information since reading aarch64 system registers + // is not supported in user space on older linux kernels. + c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) + c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) + c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) + c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) + c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) + c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDFHM), FHM) + c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) + c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) + c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) + c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) + c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap2_RNG), RNDR) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TLB) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TS) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) + c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) + c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) + c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) + + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets on android. + c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) + + return true +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go new file mode 100644 index 000000000000..8733ba343638 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !linux && !darwin +// +build arm64,!linux,!darwin + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return false +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go new file mode 100644 index 000000000000..f8f201b5f7bb --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go @@ -0,0 +1,8 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build nounsafe +// +build nounsafe + +package cpuid + +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go new file mode 100644 index 000000000000..92af622eb8ca --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build !nounsafe +// +build !nounsafe + +package cpuid + +import _ "unsafe" // needed for go:linkname + +//go:linkname hwcap internal/cpu.HWCap +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh new file mode 100644 index 000000000000..471d986d2488 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null . + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null . + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null . + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null . +done diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE new file mode 100644 index 000000000000..74487567632c --- /dev/null +++ b/vendor/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md new file mode 100644 index 000000000000..febeba5ebeb9 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/README.md @@ -0,0 +1,42 @@ +# 2025 revival + +For IEEE checksums AVX512 can be used to speed up CRC32 checksums by approximately 2x. + +Castagnoli checksums (CRC32C) can also be computer with AVX512, +but the performance gain is not as significant enough for the downsides of using it at this point. + +# crc32 + +This package is a drop-in replacement for the standard library `hash/crc32` package, +that features AVX 512 optimizations on x64 platforms, for a 2x speedup for IEEE CRC32 checksums. + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.24 + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes +* 2025: Revived and updated to Go 1.24, with AVX 512 optimizations. + +# performance + +AVX512 are enabled above 1KB input size. This rather high limit is due to AVX512 may be slower to ramp up than +the regular SSE4 implementation for smaller inputs. This is not reflected in the benchmarks below. + +| Benchmark | Old MB/s | New MB/s | Speedup | +|-----------------------------------------------|----------|----------|---------| +| BenchmarkCRC32/poly=IEEE/size=512/align=0-32 | 17996.39 | 17969.94 | 1.00x | +| BenchmarkCRC32/poly=IEEE/size=512/align=1-32 | 18021.48 | 17945.55 | 1.00x | +| BenchmarkCRC32/poly=IEEE/size=1kB/align=0-32 | 19921.70 | 45613.77 | 2.29x | +| BenchmarkCRC32/poly=IEEE/size=1kB/align=1-32 | 19946.60 | 46819.09 | 2.35x | +| BenchmarkCRC32/poly=IEEE/size=4kB/align=0-32 | 21538.65 | 48600.93 | 2.26x | +| BenchmarkCRC32/poly=IEEE/size=4kB/align=1-32 | 21449.20 | 48477.84 | 2.26x | +| BenchmarkCRC32/poly=IEEE/size=32kB/align=0-32 | 21785.49 | 46013.10 | 2.11x | +| BenchmarkCRC32/poly=IEEE/size=32kB/align=1-32 | 21946.47 | 45954.10 | 2.09x | + +cpu: AMD Ryzen 9 9950X 16-Core Processor + +# license + +Standard Go license. See [LICENSE](LICENSE) for details. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go new file mode 100644 index 000000000000..1de0bb3a478c --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,253 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See https://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "encoding/binary" + "errors" + "hash" + "sync" + "sync/atomic" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // https://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // https://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// This file makes use of functions implemented in architecture-specific files. +// The interface that they implement is as follows: +// +// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE +// // algorithm is available. +// archAvailableIEEE() bool +// +// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. +// // It can only be called if archAvailableIEEE() returns true. +// archInitIEEE() +// +// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if +// // archInitIEEE() was previously called. +// archUpdateIEEE(crc uint32, p []byte) uint32 +// +// // archAvailableCastagnoli reports whether an architecture-specific +// // CRC32-C algorithm is available. +// archAvailableCastagnoli() bool +// +// // archInitCastagnoli initializes the architecture-specific CRC32-C +// // algorithm. It can only be called if archAvailableCastagnoli() returns +// // true. +// archInitCastagnoli() +// +// // archUpdateCastagnoli updates the given CRC32-C. It can only be called +// // if archInitCastagnoli() was previously called. +// archUpdateCastagnoli(crc uint32, p []byte) uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var updateCastagnoli func(crc uint32, p []byte) uint32 +var haveCastagnoli atomic.Bool + +var castagnoliInitOnce = sync.OnceFunc(func() { + castagnoliTable = simpleMakeTable(Castagnoli) + + if archAvailableCastagnoli() { + archInitCastagnoli() + updateCastagnoli = archUpdateCastagnoli + } else { + // Initialize the slicing-by-8 table. + castagnoliTable8 = slicingMakeTable(Castagnoli) + updateCastagnoli = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, castagnoliTable8, p) + } + } + + haveCastagnoli.Store(true) +}) + +// IEEETable is the table for the [IEEE] polynomial. +var IEEETable = simpleMakeTable(IEEE) + +// ieeeTable8 is the slicing8Table for IEEE +var ieeeTable8 *slicing8Table +var updateIEEE func(crc uint32, p []byte) uint32 + +var ieeeInitOnce = sync.OnceFunc(func() { + if archAvailableIEEE() { + archInitIEEE() + updateIEEE = archUpdateIEEE + } else { + // Initialize the slicing-by-8 table. + ieeeTable8 = slicingMakeTable(IEEE) + updateIEEE = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, ieeeTable8, p) + } + } +}) + +// MakeTable returns a [Table] constructed from the specified polynomial. +// The contents of this [Table] must not be modified. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + ieeeInitOnce() + return IEEETable + case Castagnoli: + castagnoliInitOnce() + return castagnoliTable + default: + return simpleMakeTable(poly) + } +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new [hash.Hash32] computing the CRC-32 checksum using the +// polynomial represented by the [Table]. Its Sum method will lay the +// value out in big-endian byte order. The returned Hash32 also +// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to +// marshal and unmarshal the internal state of the hash. +func New(tab *Table) hash.Hash32 { + if tab == IEEETable { + ieeeInitOnce() + } + return &digest{0, tab} +} + +// NewIEEE creates a new [hash.Hash32] computing the CRC-32 checksum using +// the [IEEE] polynomial. Its Sum method will lay the value out in +// big-endian byte order. The returned Hash32 also implements +// [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to marshal +// and unmarshal the internal state of the hash. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +const ( + magic = "crc\x01" + marshaledSize = len(magic) + 4 + 4 +) + +func (d *digest) AppendBinary(b []byte) ([]byte, error) { + b = append(b, magic...) + b = binary.BigEndian.AppendUint32(b, tableSum(d.tab)) + b = binary.BigEndian.AppendUint32(b, d.crc) + return b, nil +} + +func (d *digest) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) + +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("hash/crc32: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("hash/crc32: invalid hash state size") + } + if tableSum(d.tab) != binary.BigEndian.Uint32(b[4:]) { + return errors.New("hash/crc32: tables do not match") + } + d.crc = binary.BigEndian.Uint32(b[8:]) + return nil +} + +func update(crc uint32, tab *Table, p []byte, checkInitIEEE bool) uint32 { + switch { + case haveCastagnoli.Load() && tab == castagnoliTable: + return updateCastagnoli(crc, p) + case tab == IEEETable: + if checkInitIEEE { + ieeeInitOnce() + } + return updateIEEE(crc, p) + default: + return simpleUpdate(crc, tab, p) + } +} + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + // Unfortunately, because IEEETable is exported, IEEE may be used without a + // call to MakeTable. We have to make sure it gets initialized in that case. + return update(crc, tab, p, true) +} + +func (d *digest) Write(p []byte) (n int, err error) { + // We only create digest objects through New() which takes care of + // initialization in this case. + d.crc = update(d.crc, d.tab, p, false) + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the [Table]. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the [IEEE] polynomial. +func ChecksumIEEE(data []byte) uint32 { + ieeeInitOnce() + return updateIEEE(0, data) +} + +// tableSum returns the IEEE checksum of table t. +func tableSum(t *Table) uint32 { + var a [1024]byte + b := a[:0] + if t != nil { + for _, x := range t { + b = binary.BigEndian.AppendUint32(b, x) + } + } + return ChecksumIEEE(b) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 000000000000..c6d30b25c923 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,253 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import ( + "unsafe" + + "golang.org/x/sys/cpu" +) + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE 4.2 CRC32 +// instruction. +// +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE 4.2 CRC32 +// instruction. +// +//go:noescape +func castagnoliSSE42Triple( + crcA, crcB, crcC uint32, + a, b, c []byte, + rounds uint32, +) (retA uint32, retB uint32, retC uint32) + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// +//go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +// castagnoliCLMULAvx512 is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// +//go:noescape +func castagnoliCLMULAvx512(crc uint32, p []byte) uint32 + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// +//go:noescape +func ieeeCLMULAvx512(crc uint32, p []byte) uint32 + +const castagnoliK1 = 168 +const castagnoliK2 = 1344 + +type sse42Table [4]Table + +var castagnoliSSE42TableK1 *sse42Table +var castagnoliSSE42TableK2 *sse42Table + +func archAvailableCastagnoli() bool { + return cpu.X86.HasSSE42 +} + +func archInitCastagnoli() { + if !cpu.X86.HasSSE42 { + panic("arch-specific Castagnoli not available") + } + castagnoliSSE42TableK1 = new(sse42Table) + castagnoliSSE42TableK2 = new(sse42Table) + // See description in updateCastagnoli. + // t[0][i] = CRC(i000, O) + // t[1][i] = CRC(0i00, O) + // t[2][i] = CRC(00i0, O) + // t[3][i] = CRC(000i, O) + // where O is a sequence of K zeros. + var tmp [castagnoliK2]byte + for b := 0; b < 4; b++ { + for i := 0; i < 256; i++ { + val := uint32(i) << uint32(b*8) + castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) + castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) + } + } +} + +// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the +// table given) with the given initial crc value. This corresponds to +// CRC(crc, O) in the description in updateCastagnoli. +func castagnoliShift(table *sse42Table, crc uint32) uint32 { + return table[3][crc>>24] ^ + table[2][(crc>>16)&0xFF] ^ + table[1][(crc>>8)&0xFF] ^ + table[0][crc&0xFF] +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !cpu.X86.HasSSE42 { + panic("not available") + } + + // This method is inspired from the algorithm in Intel's white paper: + // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" + // The same strategy of splitting the buffer in three is used but the + // combining calculation is different; the complete derivation is explained + // below. + // + // -- The basic idea -- + // + // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a + // time. In recent Intel architectures the instruction takes 3 cycles; + // however the processor can pipeline up to three instructions if they + // don't depend on each other. + // + // Roughly this means that we can process three buffers in about the same + // time we can process one buffer. + // + // The idea is then to split the buffer in three, CRC the three pieces + // separately and then combine the results. + // + // Combining the results requires precomputed tables, so we must choose a + // fixed buffer length to optimize. The longer the length, the faster; but + // only buffers longer than this length will use the optimization. We choose + // two cutoffs and compute tables for both: + // - one around 512: 168*3=504 + // - one around 4KB: 1344*3=4032 + // + // -- The nitty gritty -- + // + // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with + // initial non-inverted CRC I). This function has the following properties: + // (a) CRC(I, AB) = CRC(CRC(I, A), B) + // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) + // + // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of + // K bytes each, where K is a fixed constant. Let O be the sequence of K zero + // bytes. + // + // CRC(I, ABC) = CRC(I, ABO xor C) + // = CRC(I, ABO) xor CRC(0, C) + // = CRC(CRC(I, AB), O) xor CRC(0, C) + // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) + // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) + // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) + // + // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), + // and CRC(0, C) efficiently. We just need to find a way to quickly compute + // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these + // values; since we can't have a 32-bit table, we break it up into four + // 8-bit tables: + // + // CRC(uvwx, O) = CRC(u000, O) xor + // CRC(0v00, O) xor + // CRC(00w0, O) xor + // CRC(000x, O) + // + // We can compute tables corresponding to the four terms for all 8-bit + // values. + + crc = ^crc + + // Disabled, since it is not significantly faster than the SSE 4.2 version, even on Zen 5. + if false && len(p) >= 2048 && cpu.X86.HasAVX512F && cpu.X86.HasAVX512VL && cpu.X86.HasAVX512VPCLMULQDQ && cpu.X86.HasPCLMULQDQ { + left := len(p) & 15 + do := len(p) - left + crc = castagnoliCLMULAvx512(crc, p[:do]) + return ^castagnoliSSE42(crc, p[do:]) + } + + // If a buffer is long enough to use the optimization, process the first few + // bytes to align the buffer to an 8 byte boundary (if necessary). + if len(p) >= castagnoliK1*3 { + delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) + if delta != 0 { + delta = 8 - delta + crc = castagnoliSSE42(crc, p[:delta]) + p = p[delta:] + } + } + + // Process 3*K2 at a time. + for len(p) >= castagnoliK2*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK2:], p[castagnoliK2*2:], + castagnoliK2/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC + p = p[castagnoliK2*3:] + } + + // Process 3*K1 at a time. + for len(p) >= castagnoliK1*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK1:], p[castagnoliK1*2:], + castagnoliK1/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC + p = p[castagnoliK1*3:] + } + + // Use the simple implementation for what's left. + crc = castagnoliSSE42(crc, p) + return ^crc +} + +func archAvailableIEEE() bool { + return cpu.X86.HasPCLMULQDQ && cpu.X86.HasSSE41 +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !cpu.X86.HasPCLMULQDQ || !cpu.X86.HasSSE41 { + panic("not available") + } + + if len(p) >= 64 { + if len(p) >= 1024 && cpu.X86.HasAVX512F && cpu.X86.HasAVX512VL && cpu.X86.HasAVX512VPCLMULQDQ && cpu.X86.HasPCLMULQDQ { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMULAvx512(^crc, p[:do]) + p = p[do:] + } else { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMUL(^crc, p[:do]) + p = p[do:] + } + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 000000000000..e2de3a5cb684 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,527 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. +// +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + // If there are fewer than 8 bytes to process, skip alignment. + CMPQ CX, $8 + JL less_than_8 + + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + // Process the first few bytes to 8-byte align the input. + + // BX = 8 - BX. We need to process this many bytes to align. + SUBQ $1, BX + XORQ $7, BX + + BTQ $0, BX + JNC align_2 + + CRC32B (SI), AX + DECQ CX + INCQ SI + +align_2: + BTQ $1, BX + JNC align_4 + + CRC32W (SI), AX + + SUBQ $2, CX + ADDQ $2, SI + +align_4: + BTQ $2, BX + JNC aligned + + CRC32L (SI), AX + + SUBQ $4, CX + ADDQ $4, SI + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL less_than_8 + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + BTQ $2, CX + JNC less_than_4 + + CRC32L (SI), AX + ADDQ $4, SI + +less_than_4: + BTQ $1, CX + JNC less_than_2 + + CRC32W (SI), AX + ADDQ $2, SI + +less_than_2: + BTQ $0, CX + JNC done + + CRC32B (SI), AX + +done: + MOVL AX, ret+32(FP) + RET + +// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) +// bytes from each buffer. +// +// func castagnoliSSE42Triple( +// crc1, crc2, crc3 uint32, +// a, b, c []byte, +// rounds uint32, +// ) (retA uint32, retB uint32, retC uint32) +TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 + MOVL crcA+0(FP), AX + MOVL crcB+4(FP), CX + MOVL crcC+8(FP), DX + + MOVQ a+16(FP), R8 // data pointer + MOVQ b+40(FP), R9 // data pointer + MOVQ c+64(FP), R10 // data pointer + + MOVL rounds+88(FP), R11 + +loop: + CRC32Q (R8), AX + CRC32Q (R9), CX + CRC32Q (R10), DX + + CRC32Q 8(R8), AX + CRC32Q 8(R9), CX + CRC32Q 8(R10), DX + + CRC32Q 16(R8), AX + CRC32Q 16(R9), CX + CRC32Q 16(R10), DX + + ADDQ $24, R8 + ADDQ $24, R9 + ADDQ $24, R10 + + DECQ R11 + JNZ loop + + MOVL AX, retA+96(FP) + MOVL CX, retB+100(FP) + MOVL DX, retC+104(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1<>+0(SB)/8, $0x154442bd4 +DATA r2r1<>+8(SB)/8, $0x1c6e41596 +DATA r4r3<>+0(SB)/8, $0x1751997d0 +DATA r4r3<>+8(SB)/8, $0x0ccaa009e +DATA rupoly<>+0(SB)/8, $0x1db710641 +DATA rupoly<>+8(SB)/8, $0x1f7011641 +DATA r5<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1<>(SB), RODATA, $16 +GLOBL r4r3<>(SB), RODATA, $16 +GLOBL rupoly<>(SB), RODATA, $16 +GLOBL r5<>(SB), RODATA, $8 + +// Based on https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOA r2r1<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOA r4r3<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupoly<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + PEXTRD $1, X1, AX + MOVL AX, ret+32(FP) + + RET + +DATA r2r1X<>+0(SB)/8, $0x154442bd4 +DATA r2r1X<>+8(SB)/8, $0x1c6e41596 +DATA r2r1X<>+16(SB)/8, $0x154442bd4 +DATA r2r1X<>+24(SB)/8, $0x1c6e41596 +DATA r2r1X<>+32(SB)/8, $0x154442bd4 +DATA r2r1X<>+40(SB)/8, $0x1c6e41596 +DATA r2r1X<>+48(SB)/8, $0x154442bd4 +DATA r2r1X<>+56(SB)/8, $0x1c6e41596 +GLOBL r2r1X<>(SB), RODATA, $64 + +// Based on https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 128, and must be a multiple of 16. + +// func ieeeCLMULAvx512(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMULAvx512(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + VPXORQ Z0, Z0, Z0 + VMOVDQU64 (SI), Z1 + VMOVQ AX, X0 + VPXORQ Z0, Z1, Z1 // Merge initial CRC value into Z1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + + VMOVDQU64 r2r1X<>+0(SB), Z0 + +loopback64: + // Load next early + VMOVDQU64 (SI), Z11 + + VPCLMULQDQ $0x11, Z0, Z1, Z5 + VPCLMULQDQ $0, Z0, Z1, Z1 + + VPTERNLOGD $0x96, Z11, Z5, Z1 // Combine results with xor into Z1 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + VEXTRACTF32X4 $1, Z1, X2 // X2: Second 128-bit lane + VEXTRACTF32X4 $2, Z1, X3 // X3: Third 128-bit lane + VEXTRACTF32X4 $3, Z1, X4 // X4: Fourth 128-bit lane + + MOVOA r4r3<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupoly<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + PEXTRD $1, X1, AX + MOVL AX, ret+32(FP) + VZEROUPPER + RET + +// Castagonli Polynomial constants +DATA r2r1C<>+0(SB)/8, $0x0740eef02 +DATA r2r1C<>+8(SB)/8, $0x09e4addf8 +DATA r2r1C<>+16(SB)/8, $0x0740eef02 +DATA r2r1C<>+24(SB)/8, $0x09e4addf8 +DATA r2r1C<>+32(SB)/8, $0x0740eef02 +DATA r2r1C<>+40(SB)/8, $0x09e4addf8 +DATA r2r1C<>+48(SB)/8, $0x0740eef02 +DATA r2r1C<>+56(SB)/8, $0x09e4addf8 +GLOBL r2r1C<>(SB), RODATA, $64 + +DATA r4r3C<>+0(SB)/8, $0xf20c0dfe +DATA r4r3C<>+8(SB)/8, $0x14cd00bd6 +DATA rupolyC<>+0(SB)/8, $0x105ec76f0 +DATA rupolyC<>+8(SB)/8, $0xdea713f1 +DATA r5C<>+0(SB)/8, $0xdd45aab8 + +GLOBL r4r3C<>(SB), RODATA, $16 +GLOBL rupolyC<>(SB), RODATA, $16 +GLOBL r5C<>(SB), RODATA, $8 + +// Based on https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 128, and must be a multiple of 16. + +// func castagnoliCLMULAvx512(crc uint32, p []byte) uint32 +TEXT ·castagnoliCLMULAvx512(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + VPXORQ Z0, Z0, Z0 + VMOVDQU64 (SI), Z1 + VMOVQ AX, X0 + VPXORQ Z0, Z1, Z1 // Merge initial CRC value into Z1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + + VMOVDQU64 r2r1C<>+0(SB), Z0 + +loopback64: + // Load next early + VMOVDQU64 (SI), Z11 + + VPCLMULQDQ $0x11, Z0, Z1, Z5 + VPCLMULQDQ $0, Z0, Z1, Z1 + + VPTERNLOGD $0x96, Z11, Z5, Z1 // Combine results with xor into Z1 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + VEXTRACTF32X4 $1, Z1, X2 // X2: Second 128-bit lane + VEXTRACTF32X4 $2, Z1, X3 // X3: Third 128-bit lane + VEXTRACTF32X4 $3, Z1, X4 // X4: Fourth 128-bit lane + + MOVOA r4r3C<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5C<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupolyC<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + PEXTRD $1, X1, AX + MOVL AX, ret+32(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_arm64.go b/vendor/github.com/klauspost/crc32/crc32_arm64.go new file mode 100644 index 000000000000..7e9ac5539867 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_arm64.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ARM64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "golang.org/x/sys/cpu" + +func castagnoliUpdate(crc uint32, p []byte) uint32 +func ieeeUpdate(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return cpu.ARM64.HasCRC32 +} + +func archInitCastagnoli() { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } + + return ^castagnoliUpdate(^crc, p) +} + +func archAvailableIEEE() bool { + return cpu.ARM64.HasCRC32 +} + +func archInitIEEE() { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !cpu.ARM64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } + + return ^ieeeUpdate(^crc, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_arm64.s b/vendor/github.com/klauspost/crc32/crc32_arm64.s new file mode 100644 index 000000000000..e82778f7bd61 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_arm64.s @@ -0,0 +1,97 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// castagnoliUpdate updates the non-inverted crc with the given data. + +// func castagnoliUpdate(crc uint32, p []byte) uint32 +TEXT ·castagnoliUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R9 // CRC value + MOVD p+8(FP), R13 // data pointer + MOVD p_len+16(FP), R11 // len(p) + +update: + CMP $16, R11 + BLT less_than_16 + LDP.P 16(R13), (R8, R10) + CRC32CX R8, R9 + CRC32CX R10, R9 + SUB $16, R11 + + JMP update + +less_than_16: + TBZ $3, R11, less_than_8 + + MOVD.P 8(R13), R10 + CRC32CX R10, R9 + +less_than_8: + TBZ $2, R11, less_than_4 + + MOVWU.P 4(R13), R10 + CRC32CW R10, R9 + +less_than_4: + TBZ $1, R11, less_than_2 + + MOVHU.P 2(R13), R10 + CRC32CH R10, R9 + +less_than_2: + TBZ $0, R11, done + + MOVBU (R13), R10 + CRC32CB R10, R9 + +done: + MOVWU R9, ret+32(FP) + RET + +// ieeeUpdate updates the non-inverted crc with the given data. + +// func ieeeUpdate(crc uint32, p []byte) uint32 +TEXT ·ieeeUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R9 // CRC value + MOVD p+8(FP), R13 // data pointer + MOVD p_len+16(FP), R11 // len(p) + +update: + CMP $16, R11 + BLT less_than_16 + LDP.P 16(R13), (R8, R10) + CRC32X R8, R9 + CRC32X R10, R9 + SUB $16, R11 + + JMP update + +less_than_16: + TBZ $3, R11, less_than_8 + + MOVD.P 8(R13), R10 + CRC32X R10, R9 + +less_than_8: + TBZ $2, R11, less_than_4 + + MOVWU.P 4(R13), R10 + CRC32W R10, R9 + +less_than_4: + TBZ $1, R11, less_than_2 + + MOVHU.P 2(R13), R10 + CRC32H R10, R9 + +less_than_2: + TBZ $0, R11, done + + MOVBU (R13), R10 + CRC32B R10, R9 + +done: + MOVWU R9, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 000000000000..d1cf69cf4627 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains CRC32 algorithms that are not specific to any architecture +// and don't use hardware acceleration. +// +// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. +// +// The slicing-by-8 algorithm is a faster implementation that uses a bigger +// table (8*256*4 bytes). + +package crc32 + +import "encoding/binary" + +// simpleMakeTable allocates and constructs a Table for the specified +// polynomial. The table is suitable for use with the simple algorithm +// (simpleUpdate). +func simpleMakeTable(poly uint32) *Table { + t := new(Table) + simplePopulateTable(poly, t) + return t +} + +// simplePopulateTable constructs a Table for the specified polynomial, suitable +// for use with simpleUpdate. +func simplePopulateTable(poly uint32, t *Table) { + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } +} + +// simpleUpdate uses the simple algorithm to update the CRC, given a table that +// was previously computed using simpleMakeTable. +func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Use slicing-by-8 when payload >= this value. +const slicing8Cutoff = 16 + +// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. +type slicing8Table [8]Table + +// slicingMakeTable constructs a slicing8Table for the specified polynomial. The +// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). +func slicingMakeTable(poly uint32) *slicing8Table { + t := new(slicing8Table) + simplePopulateTable(poly, &t[0]) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a +// table that was previously computed using slicingMakeTable. +func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { + if len(p) >= slicing8Cutoff { + crc = ^crc + for len(p) > 8 { + crc ^= binary.LittleEndian.Uint32(p) + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + } + if len(p) == 0 { + return crc + } + return simpleUpdate(crc, &tab[0], p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_loong64.go b/vendor/github.com/klauspost/crc32/crc32_loong64.go new file mode 100644 index 000000000000..3e0fd9778d5d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_loong64.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// LoongArch64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "golang.org/x/sys/cpu" + +func castagnoliUpdate(crc uint32, p []byte) uint32 +func ieeeUpdate(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return cpu.Loong64.HasCRC32 +} + +func archInitCastagnoli() { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for Castagnoli not available") + } + + return ^castagnoliUpdate(^crc, p) +} + +func archAvailableIEEE() bool { + return cpu.Loong64.HasCRC32 +} + +func archInitIEEE() { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !cpu.Loong64.HasCRC32 { + panic("arch-specific crc32 instruction for IEEE not available") + } + + return ^ieeeUpdate(^crc, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_loong64.s b/vendor/github.com/klauspost/crc32/crc32_loong64.s new file mode 100644 index 000000000000..7165714dcad7 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_loong64.s @@ -0,0 +1,160 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// castagnoliUpdate updates the non-inverted crc with the given data. + +// func castagnoliUpdate(crc uint32, p []byte) uint32 +TEXT ·castagnoliUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R4 // a0 = CRC value + MOVV p+8(FP), R5 // a1 = data pointer + MOVV p_len+16(FP), R6 // a2 = len(p) + + SGT $8, R6, R12 + BNE R12, less_than_8 + AND $7, R5, R12 + BEQ R12, aligned + + // Process the first few bytes to 8-byte align the input. + // t0 = 8 - t0. We need to process this many bytes to align. + SUB $1, R12 + XOR $7, R12 + + AND $1, R12, R13 + BEQ R13, align_2 + MOVB (R5), R13 + CRCCWBW R4, R13, R4 + ADDV $1, R5 + ADDV $-1, R6 + +align_2: + AND $2, R12, R13 + BEQ R13, align_4 + MOVH (R5), R13 + CRCCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +align_4: + AND $4, R12, R13 + BEQ R13, aligned + MOVW (R5), R13 + CRCCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + SGT $8, R6, R12 + BNE R12, less_than_8 + MOVV (R5), R13 + CRCCWVW R4, R13, R4 + ADDV $8, R5 + ADDV $-8, R6 + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + AND $4, R6, R12 + BEQ R12, less_than_4 + MOVW (R5), R13 + CRCCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +less_than_4: + AND $2, R6, R12 + BEQ R12, less_than_2 + MOVH (R5), R13 + CRCCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +less_than_2: + BEQ R6, done + MOVB (R5), R13 + CRCCWBW R4, R13, R4 + +done: + MOVW R4, ret+32(FP) + RET + +// ieeeUpdate updates the non-inverted crc with the given data. + +// func ieeeUpdate(crc uint32, p []byte) uint32 +TEXT ·ieeeUpdate(SB), NOSPLIT, $0-36 + MOVWU crc+0(FP), R4 // a0 = CRC value + MOVV p+8(FP), R5 // a1 = data pointer + MOVV p_len+16(FP), R6 // a2 = len(p) + + SGT $8, R6, R12 + BNE R12, less_than_8 + AND $7, R5, R12 + BEQ R12, aligned + + // Process the first few bytes to 8-byte align the input. + // t0 = 8 - t0. We need to process this many bytes to align. + SUB $1, R12 + XOR $7, R12 + + AND $1, R12, R13 + BEQ R13, align_2 + MOVB (R5), R13 + CRCWBW R4, R13, R4 + ADDV $1, R5 + ADDV $-1, R6 + +align_2: + AND $2, R12, R13 + BEQ R13, align_4 + MOVH (R5), R13 + CRCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +align_4: + AND $4, R12, R13 + BEQ R13, aligned + MOVW (R5), R13 + CRCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + SGT $8, R6, R12 + BNE R12, less_than_8 + MOVV (R5), R13 + CRCWVW R4, R13, R4 + ADDV $8, R5 + ADDV $-8, R6 + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + AND $4, R6, R12 + BEQ R12, less_than_4 + MOVW (R5), R13 + CRCWWW R4, R13, R4 + ADDV $4, R5 + ADDV $-4, R6 + +less_than_4: + AND $2, R6, R12 + BEQ R12, less_than_2 + MOVH (R5), R13 + CRCWHW R4, R13, R4 + ADDV $2, R5 + ADDV $-2, R6 + +less_than_2: + BEQ R6, done + MOVB (R5), R13 + CRCWBW R4, R13, R4 + +done: + MOVW R4, ret+32(FP) + RET + diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go new file mode 100644 index 000000000000..f900968ad3d0 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_otherarch.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 && !s390x && !ppc64le && !arm64 && !loong64 + +package crc32 + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } + +func archAvailableCastagnoli() bool { return false } +func archInitCastagnoli() { panic("not available") } +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_ppc64le.go b/vendor/github.com/klauspost/crc32/crc32_ppc64le.go new file mode 100644 index 000000000000..c22e38e00947 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_ppc64le.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +import ( + "unsafe" +) + +const ( + vecMinLen = 16 + vecAlignMask = 15 // align to 16 bytes + crcIEEE = 1 + crcCast = 2 +) + +//go:noescape +func ppc64SlicingUpdateBy8(crc uint32, table8 *slicing8Table, p []byte) uint32 + +// this function requires the buffer to be 16 byte aligned and > 16 bytes long. +// +//go:noescape +func vectorCrc32(crc uint32, poly uint32, p []byte) uint32 + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if len(p) >= 4*vecMinLen { + // If not aligned then process the initial unaligned bytes + + if uint64(uintptr(unsafe.Pointer(&p[0])))&uint64(vecAlignMask) != 0 { + align := uint64(uintptr(unsafe.Pointer(&p[0]))) & uint64(vecAlignMask) + newlen := vecMinLen - align + crc = ppc64SlicingUpdateBy8(crc, archCastagnoliTable8, p[:newlen]) + p = p[newlen:] + } + // p should be aligned now + aligned := len(p) & ^vecAlignMask + crc = vectorCrc32(crc, crcCast, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return ppc64SlicingUpdateBy8(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return true +} +func archAvailableCastagnoli() bool { + return true +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + + // Check if vector code should be used. If not aligned, then handle those + // first up to the aligned bytes. + + if len(p) >= 4*vecMinLen { + if uint64(uintptr(unsafe.Pointer(&p[0])))&uint64(vecAlignMask) != 0 { + align := uint64(uintptr(unsafe.Pointer(&p[0]))) & uint64(vecAlignMask) + newlen := vecMinLen - align + crc = ppc64SlicingUpdateBy8(crc, archIeeeTable8, p[:newlen]) + p = p[newlen:] + } + aligned := len(p) & ^vecAlignMask + crc = vectorCrc32(crc, crcIEEE, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return ppc64SlicingUpdateBy8(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_ppc64le.s b/vendor/github.com/klauspost/crc32/crc32_ppc64le.s new file mode 100644 index 000000000000..87edef7053f9 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_ppc64le.s @@ -0,0 +1,736 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The vectorized implementation found below is a derived work +// from code written by Anton Blanchard found +// at https://github.com/antonblanchard/crc32-vpmsum. The original +// is dual licensed under GPL and Apache 2. As the copyright holder +// for the work, IBM has contributed this new work under +// the golang license. + +// Changes include porting to Go assembler with modifications for +// the Go ABI for ppc64le. + +#include "textflag.h" + +#define POWER8_OFFSET 132 + +#define off16 R16 +#define off32 R17 +#define off48 R18 +#define off64 R19 +#define off80 R20 +#define off96 R21 +#define off112 R22 + +#define const1 V24 +#define const2 V25 + +#define byteswap V26 +#define mask_32bit V27 +#define mask_64bit V28 +#define zeroes V29 + +#define MAX_SIZE 32*1024 +#define REFLECT + +TEXT ·ppc64SlicingUpdateBy8(SB), NOSPLIT|NOFRAME, $0-44 + MOVWZ crc+0(FP), R3 // incoming crc + MOVD table8+8(FP), R4 // *Table + MOVD p+16(FP), R5 + MOVD p_len+24(FP), R6 // p len + + CMP $0, R6 // len == 0? + BNE start + MOVW R3, ret+40(FP) // return crc + RET + +start: + NOR R3, R3, R7 // ^crc + MOVWZ R7, R7 // 32 bits + CMP R6, $16 + MOVD R6, CTR + BLT short + SRAD $3, R6, R8 // 8 byte chunks + MOVD R8, CTR + +loop: + MOVWZ 0(R5), R8 // 0-3 bytes of p ?Endian? + MOVWZ 4(R5), R9 // 4-7 bytes of p + MOVD R4, R10 // &tab[0] + XOR R7, R8, R7 // crc ^= byte[0:3] + RLDICL $40, R9, $56, R17 // p[7] + SLD $2, R17, R17 // p[7]*4 + RLDICL $40, R7, $56, R8 // crc>>24 + SLD $2, R8, R8 // crc>>24*4 + RLDICL $48, R9, $56, R18 // p[6] + SLD $2, R18, R18 // p[6]*4 + MOVWZ (R10)(R17), R21 // tab[0][p[7]] + ADD $1024, R10, R10 // tab[1] + RLDICL $56, R9, $56, R19 // p[5] + SLD $2, R19, R19 // p[5]*4:1 + MOVWZ (R10)(R18), R22 // tab[1][p[6]] + ADD $1024, R10, R10 // tab[2] + XOR R21, R22, R21 // xor done R22 + CLRLSLDI $56, R9, $2, R20 + MOVWZ (R10)(R19), R23 // tab[2][p[5]] + ADD $1024, R10, R10 // &tab[3] + XOR R21, R23, R21 // xor done R23 + MOVWZ (R10)(R20), R24 // tab[3][p[4]] + ADD $1024, R10, R10 // &tab[4] + XOR R21, R24, R21 // xor done R24 + MOVWZ (R10)(R8), R25 // tab[4][crc>>24] + RLDICL $48, R7, $56, R24 // crc>>16&0xFF + XOR R21, R25, R21 // xor done R25 + ADD $1024, R10, R10 // &tab[5] + SLD $2, R24, R24 // crc>>16&0xFF*4 + MOVWZ (R10)(R24), R26 // tab[5][crc>>16&0xFF] + XOR R21, R26, R21 // xor done R26 + RLDICL $56, R7, $56, R25 // crc>>8 + ADD $1024, R10, R10 // &tab[6] + SLD $2, R25, R25 // crc>>8&FF*2 + MOVBZ R7, R26 // crc&0xFF + MOVWZ (R10)(R25), R27 // tab[6][crc>>8&0xFF] + ADD $1024, R10, R10 // &tab[7] + SLD $2, R26, R26 // crc&0xFF*2 + XOR R21, R27, R21 // xor done R27 + ADD $8, R5 // p = p[8:] + MOVWZ (R10)(R26), R28 // tab[7][crc&0xFF] + XOR R21, R28, R21 // xor done R28 + MOVWZ R21, R7 // crc for next round + BDNZ loop + ANDCC $7, R6, R8 // any leftover bytes + BEQ done // none --> done + MOVD R8, CTR // byte count + PCALIGN $16 // align short loop + +short: + MOVBZ 0(R5), R8 // get v + XOR R8, R7, R8 // byte(crc)^v -> R8 + RLDIC $2, R8, $54, R8 // rldicl r8,r8,2,22 + SRD $8, R7, R14 // crc>>8 + MOVWZ (R4)(R8), R10 + ADD $1, R5 + XOR R10, R14, R7 // loop crc in R7 + BDNZ short + +done: + NOR R7, R7, R7 // ^crc + MOVW R7, ret+40(FP) // return crc + RET + +#ifdef BYTESWAP_DATA +DATA ·byteswapcons+0(SB)/8, $0x0706050403020100 +DATA ·byteswapcons+8(SB)/8, $0x0f0e0d0c0b0a0908 + +GLOBL ·byteswapcons+0(SB), RODATA, $16 +#endif + +TEXT ·vectorCrc32(SB), NOSPLIT|NOFRAME, $0-36 + MOVWZ crc+0(FP), R3 // incoming crc + MOVWZ ctab+4(FP), R14 // crc poly id + MOVD p+8(FP), R4 + MOVD p_len+16(FP), R5 // p len + + // R3 = incoming crc + // R14 = constant table identifier + // R5 = address of bytes + // R6 = length of bytes + + // defines for index loads + + MOVD $16, off16 + MOVD $32, off32 + MOVD $48, off48 + MOVD $64, off64 + MOVD $80, off80 + MOVD $96, off96 + MOVD $112, off112 + MOVD $0, R15 + + MOVD R3, R10 // save initial crc + + NOR R3, R3, R3 // ^crc + MOVWZ R3, R3 // 32 bits + VXOR zeroes, zeroes, zeroes // clear the V reg + VSPLTISW $-1, V0 + VSLDOI $4, V29, V0, mask_32bit + VSLDOI $8, V29, V0, mask_64bit + + VXOR V8, V8, V8 + MTVSRD R3, VS40 // crc initial value VS40 = V8 + +#ifdef REFLECT + VSLDOI $8, zeroes, V8, V8 // or: VSLDOI V29,V8,V27,4 for top 32 bits? + +#else + VSLDOI $4, V8, zeroes, V8 + +#endif + +#ifdef BYTESWAP_DATA + MOVD $·byteswapcons(SB), R3 + LVX (R3), byteswap + +#endif + + CMPU R5, $256 // length of bytes + BLT short + + RLDICR $0, R5, $56, R6 // chunk to process + + // First step for larger sizes +l1: + MOVD $32768, R7 + MOVD R7, R9 + CMP R6, R7 // compare R6, R7 (MAX SIZE) + BGT top // less than MAX, just do remainder + MOVD R6, R7 + +top: + SUB R7, R6, R6 + + // mainloop does 128 bytes at a time + SRD $7, R7 + + // determine the offset into the constants table to start with. + // Each constant is 128 bytes, used against 16 bytes of data. + SLD $4, R7, R8 + SRD $3, R9, R9 + SUB R8, R9, R8 + + // The last iteration is reduced in a separate step + ADD $-1, R7 + MOVD R7, CTR + + // Determine which constant table (depends on poly) + CMP R14, $1 + BNE castTable + MOVD $·IEEEConst(SB), R3 + BR startConst + +castTable: + MOVD $·CastConst(SB), R3 + +startConst: + ADD R3, R8, R3 // starting point in constants table + + VXOR V0, V0, V0 // clear the V regs + VXOR V1, V1, V1 + VXOR V2, V2, V2 + VXOR V3, V3, V3 + VXOR V4, V4, V4 + VXOR V5, V5, V5 + VXOR V6, V6, V6 + VXOR V7, V7, V7 + + LVX (R3), const1 // loading constant values + + CMP R15, $1 // Identify warm up pass + BEQ next + + // First warm up pass: load the bytes to process + LVX (R4), V16 + LVX (R4+off16), V17 + LVX (R4+off32), V18 + LVX (R4+off48), V19 + LVX (R4+off64), V20 + LVX (R4+off80), V21 + LVX (R4+off96), V22 + LVX (R4+off112), V23 + ADD $128, R4 // bump up to next 128 bytes in buffer + + VXOR V16, V8, V16 // xor in initial CRC in V8 + +next: + BC 18, 0, first_warm_up_done + + ADD $16, R3 // bump up to next constants + LVX (R3), const2 // table values + + VPMSUMD V16, const1, V8 // second warm up pass + LVX (R4), V16 // load from buffer + OR $0, R2, R2 + + VPMSUMD V17, const1, V9 // vpmsumd with constants + LVX (R4+off16), V17 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V18, const1, V10 // vpmsumd with constants + LVX (R4+off32), V18 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V19, const1, V11 // vpmsumd with constants + LVX (R4+off48), V19 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V20, const1, V12 // vpmsumd with constants + LVX (R4+off64), V20 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V21, const1, V13 // vpmsumd with constants + LVX (R4+off80), V21 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V22, const1, V14 // vpmsumd with constants + LVX (R4+off96), V22 // load next from buffer + OR $0, R2, R2 + + VPMSUMD V23, const1, V15 // vpmsumd with constants + LVX (R4+off112), V23 // load next from buffer + + ADD $128, R4 // bump up to next 128 bytes in buffer + + BC 18, 0, first_cool_down + +cool_top: + LVX (R3), const1 // constants + ADD $16, R3 // inc to next constants + OR $0, R2, R2 + + VXOR V0, V8, V0 // xor in previous vpmsumd + VPMSUMD V16, const2, V8 // vpmsumd with constants + LVX (R4), V16 // buffer + OR $0, R2, R2 + + VXOR V1, V9, V1 // xor in previous + VPMSUMD V17, const2, V9 // vpmsumd with constants + LVX (R4+off16), V17 // next in buffer + OR $0, R2, R2 + + VXOR V2, V10, V2 // xor in previous + VPMSUMD V18, const2, V10 // vpmsumd with constants + LVX (R4+off32), V18 // next in buffer + OR $0, R2, R2 + + VXOR V3, V11, V3 // xor in previous + VPMSUMD V19, const2, V11 // vpmsumd with constants + LVX (R4+off48), V19 // next in buffer + LVX (R3), const2 // get next constant + OR $0, R2, R2 + + VXOR V4, V12, V4 // xor in previous + VPMSUMD V20, const1, V12 // vpmsumd with constants + LVX (R4+off64), V20 // next in buffer + OR $0, R2, R2 + + VXOR V5, V13, V5 // xor in previous + VPMSUMD V21, const1, V13 // vpmsumd with constants + LVX (R4+off80), V21 // next in buffer + OR $0, R2, R2 + + VXOR V6, V14, V6 // xor in previous + VPMSUMD V22, const1, V14 // vpmsumd with constants + LVX (R4+off96), V22 // next in buffer + OR $0, R2, R2 + + VXOR V7, V15, V7 // xor in previous + VPMSUMD V23, const1, V15 // vpmsumd with constants + LVX (R4+off112), V23 // next in buffer + + ADD $128, R4 // bump up buffer pointer + BDNZ cool_top // are we done? + +first_cool_down: + + // load the constants + // xor in the previous value + // vpmsumd the result with constants + + LVX (R3), const1 + ADD $16, R3 + + VXOR V0, V8, V0 + VPMSUMD V16, const1, V8 + OR $0, R2, R2 + + VXOR V1, V9, V1 + VPMSUMD V17, const1, V9 + OR $0, R2, R2 + + VXOR V2, V10, V2 + VPMSUMD V18, const1, V10 + OR $0, R2, R2 + + VXOR V3, V11, V3 + VPMSUMD V19, const1, V11 + OR $0, R2, R2 + + VXOR V4, V12, V4 + VPMSUMD V20, const1, V12 + OR $0, R2, R2 + + VXOR V5, V13, V5 + VPMSUMD V21, const1, V13 + OR $0, R2, R2 + + VXOR V6, V14, V6 + VPMSUMD V22, const1, V14 + OR $0, R2, R2 + + VXOR V7, V15, V7 + VPMSUMD V23, const1, V15 + OR $0, R2, R2 + +second_cool_down: + + VXOR V0, V8, V0 + VXOR V1, V9, V1 + VXOR V2, V10, V2 + VXOR V3, V11, V3 + VXOR V4, V12, V4 + VXOR V5, V13, V5 + VXOR V6, V14, V6 + VXOR V7, V15, V7 + +#ifdef REFLECT + VSLDOI $4, V0, zeroes, V0 + VSLDOI $4, V1, zeroes, V1 + VSLDOI $4, V2, zeroes, V2 + VSLDOI $4, V3, zeroes, V3 + VSLDOI $4, V4, zeroes, V4 + VSLDOI $4, V5, zeroes, V5 + VSLDOI $4, V6, zeroes, V6 + VSLDOI $4, V7, zeroes, V7 + +#endif + + LVX (R4), V8 + LVX (R4+off16), V9 + LVX (R4+off32), V10 + LVX (R4+off48), V11 + LVX (R4+off64), V12 + LVX (R4+off80), V13 + LVX (R4+off96), V14 + LVX (R4+off112), V15 + + ADD $128, R4 + + VXOR V0, V8, V16 + VXOR V1, V9, V17 + VXOR V2, V10, V18 + VXOR V3, V11, V19 + VXOR V4, V12, V20 + VXOR V5, V13, V21 + VXOR V6, V14, V22 + VXOR V7, V15, V23 + + MOVD $1, R15 + CMP $0, R6 + ADD $128, R6 + + BNE l1 + ANDCC $127, R5 + SUBC R5, $128, R6 + ADD R3, R6, R3 + + SRD $4, R5, R7 + MOVD R7, CTR + LVX (R3), V0 + LVX (R3+off16), V1 + LVX (R3+off32), V2 + LVX (R3+off48), V3 + LVX (R3+off64), V4 + LVX (R3+off80), V5 + LVX (R3+off96), V6 + LVX (R3+off112), V7 + + ADD $128, R3 + + VPMSUMW V16, V0, V0 + VPMSUMW V17, V1, V1 + VPMSUMW V18, V2, V2 + VPMSUMW V19, V3, V3 + VPMSUMW V20, V4, V4 + VPMSUMW V21, V5, V5 + VPMSUMW V22, V6, V6 + VPMSUMW V23, V7, V7 + + // now reduce the tail + + CMP $0, R7 + BEQ next1 + + LVX (R4), V16 + LVX (R3), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off16), V16 + LVX (R3+off16), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off32), V16 + LVX (R3+off32), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off48), V16 + LVX (R3+off48), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off64), V16 + LVX (R3+off64), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off80), V16 + LVX (R3+off80), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + BC 18, 0, next1 + + LVX (R4+off96), V16 + LVX (R3+off96), V17 + VPMSUMW V16, V17, V16 + VXOR V0, V16, V0 + +next1: + VXOR V0, V1, V0 + VXOR V2, V3, V2 + VXOR V4, V5, V4 + VXOR V6, V7, V6 + VXOR V0, V2, V0 + VXOR V4, V6, V4 + VXOR V0, V4, V0 + +barrett_reduction: + + CMP R14, $1 + BNE barcstTable + MOVD $·IEEEBarConst(SB), R3 + BR startbarConst + +barcstTable: + MOVD $·CastBarConst(SB), R3 + +startbarConst: + LVX (R3), const1 + LVX (R3+off16), const2 + + VSLDOI $8, V0, V0, V1 + VXOR V0, V1, V0 + +#ifdef REFLECT + VSPLTISB $1, V1 + VSL V0, V1, V0 + +#endif + + VAND V0, mask_64bit, V0 + +#ifndef REFLECT + + VPMSUMD V0, const1, V1 + VSLDOI $8, zeroes, V1, V1 + VPMSUMD V1, const2, V1 + VXOR V0, V1, V0 + VSLDOI $8, V0, zeroes, V0 + +#else + + VAND V0, mask_32bit, V1 + VPMSUMD V1, const1, V1 + VAND V1, mask_32bit, V1 + VPMSUMD V1, const2, V1 + VXOR V0, V1, V0 + VSLDOI $4, V0, zeroes, V0 + +#endif + + MFVSRD VS32, R3 // VS32 = V0 + + NOR R3, R3, R3 // return ^crc + MOVW R3, ret+32(FP) + RET + +first_warm_up_done: + + LVX (R3), const1 + ADD $16, R3 + + VPMSUMD V16, const1, V8 + VPMSUMD V17, const1, V9 + VPMSUMD V18, const1, V10 + VPMSUMD V19, const1, V11 + VPMSUMD V20, const1, V12 + VPMSUMD V21, const1, V13 + VPMSUMD V22, const1, V14 + VPMSUMD V23, const1, V15 + + BR second_cool_down + +short: + CMP $0, R5 + BEQ zero + + // compute short constants + + CMP R14, $1 + BNE castshTable + MOVD $·IEEEConst(SB), R3 + ADD $4080, R3 + BR startshConst + +castshTable: + MOVD $·CastConst(SB), R3 + ADD $4080, R3 + +startshConst: + SUBC R5, $256, R6 // sub from 256 + ADD R3, R6, R3 + + // calculate where to start + + SRD $4, R5, R7 + MOVD R7, CTR + + VXOR V19, V19, V19 + VXOR V20, V20, V20 + + LVX (R4), V0 + LVX (R3), V16 + VXOR V0, V8, V0 + VPMSUMW V0, V16, V0 + BC 18, 0, v0 + + LVX (R4+off16), V1 + LVX (R3+off16), V17 + VPMSUMW V1, V17, V1 + BC 18, 0, v1 + + LVX (R4+off32), V2 + LVX (R3+off32), V16 + VPMSUMW V2, V16, V2 + BC 18, 0, v2 + + LVX (R4+off48), V3 + LVX (R3+off48), V17 + VPMSUMW V3, V17, V3 + BC 18, 0, v3 + + LVX (R4+off64), V4 + LVX (R3+off64), V16 + VPMSUMW V4, V16, V4 + BC 18, 0, v4 + + LVX (R4+off80), V5 + LVX (R3+off80), V17 + VPMSUMW V5, V17, V5 + BC 18, 0, v5 + + LVX (R4+off96), V6 + LVX (R3+off96), V16 + VPMSUMW V6, V16, V6 + BC 18, 0, v6 + + LVX (R4+off112), V7 + LVX (R3+off112), V17 + VPMSUMW V7, V17, V7 + BC 18, 0, v7 + + ADD $128, R3 + ADD $128, R4 + + LVX (R4), V8 + LVX (R3), V16 + VPMSUMW V8, V16, V8 + BC 18, 0, v8 + + LVX (R4+off16), V9 + LVX (R3+off16), V17 + VPMSUMW V9, V17, V9 + BC 18, 0, v9 + + LVX (R4+off32), V10 + LVX (R3+off32), V16 + VPMSUMW V10, V16, V10 + BC 18, 0, v10 + + LVX (R4+off48), V11 + LVX (R3+off48), V17 + VPMSUMW V11, V17, V11 + BC 18, 0, v11 + + LVX (R4+off64), V12 + LVX (R3+off64), V16 + VPMSUMW V12, V16, V12 + BC 18, 0, v12 + + LVX (R4+off80), V13 + LVX (R3+off80), V17 + VPMSUMW V13, V17, V13 + BC 18, 0, v13 + + LVX (R4+off96), V14 + LVX (R3+off96), V16 + VPMSUMW V14, V16, V14 + BC 18, 0, v14 + + LVX (R4+off112), V15 + LVX (R3+off112), V17 + VPMSUMW V15, V17, V15 + + VXOR V19, V15, V19 + +v14: + VXOR V20, V14, V20 + +v13: + VXOR V19, V13, V19 + +v12: + VXOR V20, V12, V20 + +v11: + VXOR V19, V11, V19 + +v10: + VXOR V20, V10, V20 + +v9: + VXOR V19, V9, V19 + +v8: + VXOR V20, V8, V20 + +v7: + VXOR V19, V7, V19 + +v6: + VXOR V20, V6, V20 + +v5: + VXOR V19, V5, V19 + +v4: + VXOR V20, V4, V20 + +v3: + VXOR V19, V3, V19 + +v2: + VXOR V20, V2, V20 + +v1: + VXOR V19, V1, V19 + +v0: + VXOR V20, V0, V20 + + VXOR V19, V20, V0 + + BR barrett_reduction + +zero: + // This case is the original crc, so just return it + MOVW R10, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go new file mode 100644 index 000000000000..67b4ea7d989f --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +import "golang.org/x/sys/cpu" + +const ( + vxMinLen = 64 + vxAlignMask = 15 // align to 16 bytes +) + +// hasVX reports whether the machine has the z/Architecture +// vector facility installed and enabled. +var hasVX = cpu.S390X.HasVX + +// vectorizedCastagnoli implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +// +//go:noescape +func vectorizedCastagnoli(crc uint32, p []byte) uint32 + +// vectorizedIEEE implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +// +//go:noescape +func vectorizedIEEE(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return hasVX +} + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +// archUpdateCastagnoli calculates the checksum of p using +// vectorizedCastagnoli. +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedCastagnoli(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return hasVX +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedIEEE(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s new file mode 100644 index 000000000000..aefda50e1f5a --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.s @@ -0,0 +1,225 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// Vector register range containing CRC-32 constants + +#define CONST_PERM_LE2BE V9 +#define CONST_R2R1 V10 +#define CONST_R4R3 V11 +#define CONST_R5 V12 +#define CONST_RU_POLY V13 +#define CONST_CRC_POLY V14 + +// The CRC-32 constant block contains reduction constants to fold and +// process particular chunks of the input data stream in parallel. +// +// Note that the constant definitions below are extended in order to compute +// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. +// The rightmost doubleword can be 0 to prevent contribution to the result or +// can be multiplied by 1 to perform an XOR without the need for a separate +// VECTOR EXCLUSIVE OR instruction. +// +// The polynomials used are bit-reflected: +// +// IEEE: P'(x) = 0x0edb88320 +// Castagnoli: P'(x) = 0x082f63b78 + +// IEEE polynomial constants +DATA ·crclecons+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crclecons+8(SB)/8, $0x0706050403020100 +DATA ·crclecons+16(SB)/8, $0x00000001c6e41596 // R2 +DATA ·crclecons+24(SB)/8, $0x0000000154442bd4 // R1 +DATA ·crclecons+32(SB)/8, $0x00000000ccaa009e // R4 +DATA ·crclecons+40(SB)/8, $0x00000001751997d0 // R3 +DATA ·crclecons+48(SB)/8, $0x0000000000000000 +DATA ·crclecons+56(SB)/8, $0x0000000163cd6124 // R5 +DATA ·crclecons+64(SB)/8, $0x0000000000000000 +DATA ·crclecons+72(SB)/8, $0x00000001F7011641 // u' +DATA ·crclecons+80(SB)/8, $0x0000000000000000 +DATA ·crclecons+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 + +GLOBL ·crclecons(SB), RODATA, $144 + +// Castagonli Polynomial constants +DATA ·crcclecons+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crcclecons+8(SB)/8, $0x0706050403020100 +DATA ·crcclecons+16(SB)/8, $0x000000009e4addf8 // R2 +DATA ·crcclecons+24(SB)/8, $0x00000000740eef02 // R1 +DATA ·crcclecons+32(SB)/8, $0x000000014cd00bd6 // R4 +DATA ·crcclecons+40(SB)/8, $0x00000000f20c0dfe // R3 +DATA ·crcclecons+48(SB)/8, $0x0000000000000000 +DATA ·crcclecons+56(SB)/8, $0x00000000dd45aab8 // R5 +DATA ·crcclecons+64(SB)/8, $0x0000000000000000 +DATA ·crcclecons+72(SB)/8, $0x00000000dea713f1 // u' +DATA ·crcclecons+80(SB)/8, $0x0000000000000000 +DATA ·crcclecons+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 + +GLOBL ·crcclecons(SB), RODATA, $144 + +// The CRC-32 function(s) use these calling conventions: +// +// Parameters: +// +// R2: Initial CRC value, typically ~0; and final CRC (return) value. +// R3: Input buffer pointer, performance might be improved if the +// buffer is on a doubleword boundary. +// R4: Length of the buffer, must be 64 bytes or greater. +// +// Register usage: +// +// R5: CRC-32 constant pool base pointer. +// V0: Initial CRC value and intermediate constants and results. +// V1..V4: Data for CRC computation. +// V5..V8: Next data chunks that are fetched from the input buffer. +// +// V9..V14: CRC-32 constants. + +// func vectorizedIEEE(crc uint32, p []byte) uint32 +TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + MOVD $·crclecons(SB), R5 + BR vectorizedBody<>(SB) + +// func vectorizedCastagnoli(crc uint32, p []byte) uint32 +TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + // R5: crc-32 constant pool base pointer, constant is used to reduce crc + MOVD $·crcclecons(SB), R5 + BR vectorizedBody<>(SB) + +TEXT vectorizedBody<>(SB), NOSPLIT, $0 + XOR $0xffffffff, R2 // NOTW R2 + VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY + + // Load the initial CRC value into the rightmost word of V0 + VZERO V0 + VLVGF $3, R2, V0 + + // Crash if the input size is less than 64-bytes. + CMP R4, $64 + BLT crash + + // Load a 64-byte data chunk and XOR with CRC + VLM 0(R3), V1, V4 // 64-bytes into V1..V4 + + // Reflect the data if the CRC operation is in the bit-reflected domain + VPERM V1, V1, CONST_PERM_LE2BE, V1 + VPERM V2, V2, CONST_PERM_LE2BE, V2 + VPERM V3, V3, CONST_PERM_LE2BE, V3 + VPERM V4, V4, CONST_PERM_LE2BE, V4 + + VX V0, V1, V1 // V1 ^= CRC + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 + + // Check remaining buffer size and jump to proper folding method + CMP R4, $64 + BLT less_than_64bytes + +fold_64bytes_loop: + // Load the next 64-byte data chunk into V5 to V8 + VLM 0(R3), V5, V8 + VPERM V5, V5, CONST_PERM_LE2BE, V5 + VPERM V6, V6, CONST_PERM_LE2BE, V6 + VPERM V7, V7, CONST_PERM_LE2BE, V7 + VPERM V8, V8, CONST_PERM_LE2BE, V8 + + // Perform a GF(2) multiplication of the doublewords in V1 with + // the reduction constants in V0. The intermediate result is + // then folded (accumulated) with the next data chunk in V5 and + // stored in V1. Repeat this step for the register contents + // in V2, V3, and V4 respectively. + + VGFMAG CONST_R2R1, V1, V5, V1 + VGFMAG CONST_R2R1, V2, V6, V2 + VGFMAG CONST_R2R1, V3, V7, V3 + VGFMAG CONST_R2R1, V4, V8, V4 + + // Adjust buffer pointer and length for next loop + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 // LEN = LEN - 64 + + CMP R4, $64 + BGE fold_64bytes_loop + +less_than_64bytes: + // Fold V1 to V4 into a single 128-bit value in V1 + VGFMAG CONST_R4R3, V1, V2, V1 + VGFMAG CONST_R4R3, V1, V3, V1 + VGFMAG CONST_R4R3, V1, V4, V1 + + // Check whether to continue with 64-bit folding + CMP R4, $16 + BLT final_fold + +fold_16bytes_loop: + VL 0(R3), V2 // Load next data chunk + VPERM V2, V2, CONST_PERM_LE2BE, V2 + + VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk + + // Adjust buffer pointer and size for folding next data chunk + ADD $16, R3 + ADD $-16, R4 + + // Process remaining data chunks + CMP R4, $16 + BGE fold_16bytes_loop + +final_fold: + VLEIB $7, $0x40, V9 + VSRLB V9, CONST_R4R3, V0 + VLEIG $0, $1, V0 + + VGFMG V0, V1, V1 + + VLEIB $7, $0x20, V9 // Shift by words + VSRLB V9, V1, V2 // Store remaining bits in V2 + VUPLLF V1, V1 // Split rightmost doubleword + VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 + + // The input values to the Barret reduction are the degree-63 polynomial + // in V1 (R(x)), degree-32 generator polynomial, and the reduction + // constant u. The Barret reduction result is the CRC value of R(x) mod + // P(x). + // + // The Barret reduction algorithm is defined as: + // + // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u + // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) + // 3. C(x) = R(x) XOR T2(x) mod x^32 + // + // Note: To compensate the division by x^32, use the vector unpack + // instruction to move the leftmost word into the leftmost doubleword + // of the vector register. The rightmost doubleword is multiplied + // with zero to not contribute to the intermediate results. + + // T1(x) = floor( R(x) / x^32 ) GF2MUL u + VUPLLF V1, V2 + VGFMG CONST_RU_POLY, V2, V2 + + // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in + // V2 and XOR the intermediate result, T2(x), with the value in V1. + // The final result is in the rightmost word of V2. + + VUPLLF V2, V2 + VGFMAG CONST_CRC_POLY, V2, V1, V2 + +done: + VLGVF $2, V2, R2 + XOR $0xffffffff, R2 // NOTW R2 + MOVWZ R2, ret + 32(FP) + RET + +crash: + MOVD $0, (R0) // input size is less than 64-bytes + diff --git a/vendor/github.com/klauspost/crc32/crc32_table_ppc64le.s b/vendor/github.com/klauspost/crc32/crc32_table_ppc64le.s new file mode 100644 index 000000000000..1f3c1efda7f5 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_table_ppc64le.s @@ -0,0 +1,3285 @@ +// Code generated by "go run gen_const_ppc64le.go"; DO NOT EDIT. + +#include "textflag.h" + +// Reduce 262144 kbits to 1024 bits +// x^261184 mod p(x), x^261120 mod p(x) +DATA ·IEEEConst+0(SB)/8, $0x0000000099ea94a8 +DATA ·IEEEConst+8(SB)/8, $0x00000001651797d2 + +// x^260160 mod p(x), x^260096 mod p(x) +DATA ·IEEEConst+16(SB)/8, $0x00000000945a8420 +DATA ·IEEEConst+24(SB)/8, $0x0000000021e0d56c + +// x^259136 mod p(x), x^259072 mod p(x) +DATA ·IEEEConst+32(SB)/8, $0x0000000030762706 +DATA ·IEEEConst+40(SB)/8, $0x000000000f95ecaa + +// x^258112 mod p(x), x^258048 mod p(x) +DATA ·IEEEConst+48(SB)/8, $0x00000001a52fc582 +DATA ·IEEEConst+56(SB)/8, $0x00000001ebd224ac + +// x^257088 mod p(x), x^257024 mod p(x) +DATA ·IEEEConst+64(SB)/8, $0x00000001a4a7167a +DATA ·IEEEConst+72(SB)/8, $0x000000000ccb97ca + +// x^256064 mod p(x), x^256000 mod p(x) +DATA ·IEEEConst+80(SB)/8, $0x000000000c18249a +DATA ·IEEEConst+88(SB)/8, $0x00000001006ec8a8 + +// x^255040 mod p(x), x^254976 mod p(x) +DATA ·IEEEConst+96(SB)/8, $0x00000000a924ae7c +DATA ·IEEEConst+104(SB)/8, $0x000000014f58f196 + +// x^254016 mod p(x), x^253952 mod p(x) +DATA ·IEEEConst+112(SB)/8, $0x00000001e12ccc12 +DATA ·IEEEConst+120(SB)/8, $0x00000001a7192ca6 + +// x^252992 mod p(x), x^252928 mod p(x) +DATA ·IEEEConst+128(SB)/8, $0x00000000a0b9d4ac +DATA ·IEEEConst+136(SB)/8, $0x000000019a64bab2 + +// x^251968 mod p(x), x^251904 mod p(x) +DATA ·IEEEConst+144(SB)/8, $0x0000000095e8ddfe +DATA ·IEEEConst+152(SB)/8, $0x0000000014f4ed2e + +// x^250944 mod p(x), x^250880 mod p(x) +DATA ·IEEEConst+160(SB)/8, $0x00000000233fddc4 +DATA ·IEEEConst+168(SB)/8, $0x000000011092b6a2 + +// x^249920 mod p(x), x^249856 mod p(x) +DATA ·IEEEConst+176(SB)/8, $0x00000001b4529b62 +DATA ·IEEEConst+184(SB)/8, $0x00000000c8a1629c + +// x^248896 mod p(x), x^248832 mod p(x) +DATA ·IEEEConst+192(SB)/8, $0x00000001a7fa0e64 +DATA ·IEEEConst+200(SB)/8, $0x000000017bf32e8e + +// x^247872 mod p(x), x^247808 mod p(x) +DATA ·IEEEConst+208(SB)/8, $0x00000001b5334592 +DATA ·IEEEConst+216(SB)/8, $0x00000001f8cc6582 + +// x^246848 mod p(x), x^246784 mod p(x) +DATA ·IEEEConst+224(SB)/8, $0x000000011f8ee1b4 +DATA ·IEEEConst+232(SB)/8, $0x000000008631ddf0 + +// x^245824 mod p(x), x^245760 mod p(x) +DATA ·IEEEConst+240(SB)/8, $0x000000006252e632 +DATA ·IEEEConst+248(SB)/8, $0x000000007e5a76d0 + +// x^244800 mod p(x), x^244736 mod p(x) +DATA ·IEEEConst+256(SB)/8, $0x00000000ab973e84 +DATA ·IEEEConst+264(SB)/8, $0x000000002b09b31c + +// x^243776 mod p(x), x^243712 mod p(x) +DATA ·IEEEConst+272(SB)/8, $0x000000007734f5ec +DATA ·IEEEConst+280(SB)/8, $0x00000001b2df1f84 + +// x^242752 mod p(x), x^242688 mod p(x) +DATA ·IEEEConst+288(SB)/8, $0x000000007c547798 +DATA ·IEEEConst+296(SB)/8, $0x00000001d6f56afc + +// x^241728 mod p(x), x^241664 mod p(x) +DATA ·IEEEConst+304(SB)/8, $0x000000007ec40210 +DATA ·IEEEConst+312(SB)/8, $0x00000001b9b5e70c + +// x^240704 mod p(x), x^240640 mod p(x) +DATA ·IEEEConst+320(SB)/8, $0x00000001ab1695a8 +DATA ·IEEEConst+328(SB)/8, $0x0000000034b626d2 + +// x^239680 mod p(x), x^239616 mod p(x) +DATA ·IEEEConst+336(SB)/8, $0x0000000090494bba +DATA ·IEEEConst+344(SB)/8, $0x000000014c53479a + +// x^238656 mod p(x), x^238592 mod p(x) +DATA ·IEEEConst+352(SB)/8, $0x00000001123fb816 +DATA ·IEEEConst+360(SB)/8, $0x00000001a6d179a4 + +// x^237632 mod p(x), x^237568 mod p(x) +DATA ·IEEEConst+368(SB)/8, $0x00000001e188c74c +DATA ·IEEEConst+376(SB)/8, $0x000000015abd16b4 + +// x^236608 mod p(x), x^236544 mod p(x) +DATA ·IEEEConst+384(SB)/8, $0x00000001c2d3451c +DATA ·IEEEConst+392(SB)/8, $0x00000000018f9852 + +// x^235584 mod p(x), x^235520 mod p(x) +DATA ·IEEEConst+400(SB)/8, $0x00000000f55cf1ca +DATA ·IEEEConst+408(SB)/8, $0x000000001fb3084a + +// x^234560 mod p(x), x^234496 mod p(x) +DATA ·IEEEConst+416(SB)/8, $0x00000001a0531540 +DATA ·IEEEConst+424(SB)/8, $0x00000000c53dfb04 + +// x^233536 mod p(x), x^233472 mod p(x) +DATA ·IEEEConst+432(SB)/8, $0x0000000132cd7ebc +DATA ·IEEEConst+440(SB)/8, $0x00000000e10c9ad6 + +// x^232512 mod p(x), x^232448 mod p(x) +DATA ·IEEEConst+448(SB)/8, $0x0000000073ab7f36 +DATA ·IEEEConst+456(SB)/8, $0x0000000025aa994a + +// x^231488 mod p(x), x^231424 mod p(x) +DATA ·IEEEConst+464(SB)/8, $0x0000000041aed1c2 +DATA ·IEEEConst+472(SB)/8, $0x00000000fa3a74c4 + +// x^230464 mod p(x), x^230400 mod p(x) +DATA ·IEEEConst+480(SB)/8, $0x0000000136c53800 +DATA ·IEEEConst+488(SB)/8, $0x0000000033eb3f40 + +// x^229440 mod p(x), x^229376 mod p(x) +DATA ·IEEEConst+496(SB)/8, $0x0000000126835a30 +DATA ·IEEEConst+504(SB)/8, $0x000000017193f296 + +// x^228416 mod p(x), x^228352 mod p(x) +DATA ·IEEEConst+512(SB)/8, $0x000000006241b502 +DATA ·IEEEConst+520(SB)/8, $0x0000000043f6c86a + +// x^227392 mod p(x), x^227328 mod p(x) +DATA ·IEEEConst+528(SB)/8, $0x00000000d5196ad4 +DATA ·IEEEConst+536(SB)/8, $0x000000016b513ec6 + +// x^226368 mod p(x), x^226304 mod p(x) +DATA ·IEEEConst+544(SB)/8, $0x000000009cfa769a +DATA ·IEEEConst+552(SB)/8, $0x00000000c8f25b4e + +// x^225344 mod p(x), x^225280 mod p(x) +DATA ·IEEEConst+560(SB)/8, $0x00000000920e5df4 +DATA ·IEEEConst+568(SB)/8, $0x00000001a45048ec + +// x^224320 mod p(x), x^224256 mod p(x) +DATA ·IEEEConst+576(SB)/8, $0x0000000169dc310e +DATA ·IEEEConst+584(SB)/8, $0x000000000c441004 + +// x^223296 mod p(x), x^223232 mod p(x) +DATA ·IEEEConst+592(SB)/8, $0x0000000009fc331c +DATA ·IEEEConst+600(SB)/8, $0x000000000e17cad6 + +// x^222272 mod p(x), x^222208 mod p(x) +DATA ·IEEEConst+608(SB)/8, $0x000000010d94a81e +DATA ·IEEEConst+616(SB)/8, $0x00000001253ae964 + +// x^221248 mod p(x), x^221184 mod p(x) +DATA ·IEEEConst+624(SB)/8, $0x0000000027a20ab2 +DATA ·IEEEConst+632(SB)/8, $0x00000001d7c88ebc + +// x^220224 mod p(x), x^220160 mod p(x) +DATA ·IEEEConst+640(SB)/8, $0x0000000114f87504 +DATA ·IEEEConst+648(SB)/8, $0x00000001e7ca913a + +// x^219200 mod p(x), x^219136 mod p(x) +DATA ·IEEEConst+656(SB)/8, $0x000000004b076d96 +DATA ·IEEEConst+664(SB)/8, $0x0000000033ed078a + +// x^218176 mod p(x), x^218112 mod p(x) +DATA ·IEEEConst+672(SB)/8, $0x00000000da4d1e74 +DATA ·IEEEConst+680(SB)/8, $0x00000000e1839c78 + +// x^217152 mod p(x), x^217088 mod p(x) +DATA ·IEEEConst+688(SB)/8, $0x000000001b81f672 +DATA ·IEEEConst+696(SB)/8, $0x00000001322b267e + +// x^216128 mod p(x), x^216064 mod p(x) +DATA ·IEEEConst+704(SB)/8, $0x000000009367c988 +DATA ·IEEEConst+712(SB)/8, $0x00000000638231b6 + +// x^215104 mod p(x), x^215040 mod p(x) +DATA ·IEEEConst+720(SB)/8, $0x00000001717214ca +DATA ·IEEEConst+728(SB)/8, $0x00000001ee7f16f4 + +// x^214080 mod p(x), x^214016 mod p(x) +DATA ·IEEEConst+736(SB)/8, $0x000000009f47d820 +DATA ·IEEEConst+744(SB)/8, $0x0000000117d9924a + +// x^213056 mod p(x), x^212992 mod p(x) +DATA ·IEEEConst+752(SB)/8, $0x000000010d9a47d2 +DATA ·IEEEConst+760(SB)/8, $0x00000000e1a9e0c4 + +// x^212032 mod p(x), x^211968 mod p(x) +DATA ·IEEEConst+768(SB)/8, $0x00000000a696c58c +DATA ·IEEEConst+776(SB)/8, $0x00000001403731dc + +// x^211008 mod p(x), x^210944 mod p(x) +DATA ·IEEEConst+784(SB)/8, $0x000000002aa28ec6 +DATA ·IEEEConst+792(SB)/8, $0x00000001a5ea9682 + +// x^209984 mod p(x), x^209920 mod p(x) +DATA ·IEEEConst+800(SB)/8, $0x00000001fe18fd9a +DATA ·IEEEConst+808(SB)/8, $0x0000000101c5c578 + +// x^208960 mod p(x), x^208896 mod p(x) +DATA ·IEEEConst+816(SB)/8, $0x000000019d4fc1ae +DATA ·IEEEConst+824(SB)/8, $0x00000000dddf6494 + +// x^207936 mod p(x), x^207872 mod p(x) +DATA ·IEEEConst+832(SB)/8, $0x00000001ba0e3dea +DATA ·IEEEConst+840(SB)/8, $0x00000000f1c3db28 + +// x^206912 mod p(x), x^206848 mod p(x) +DATA ·IEEEConst+848(SB)/8, $0x0000000074b59a5e +DATA ·IEEEConst+856(SB)/8, $0x000000013112fb9c + +// x^205888 mod p(x), x^205824 mod p(x) +DATA ·IEEEConst+864(SB)/8, $0x00000000f2b5ea98 +DATA ·IEEEConst+872(SB)/8, $0x00000000b680b906 + +// x^204864 mod p(x), x^204800 mod p(x) +DATA ·IEEEConst+880(SB)/8, $0x0000000187132676 +DATA ·IEEEConst+888(SB)/8, $0x000000001a282932 + +// x^203840 mod p(x), x^203776 mod p(x) +DATA ·IEEEConst+896(SB)/8, $0x000000010a8c6ad4 +DATA ·IEEEConst+904(SB)/8, $0x0000000089406e7e + +// x^202816 mod p(x), x^202752 mod p(x) +DATA ·IEEEConst+912(SB)/8, $0x00000001e21dfe70 +DATA ·IEEEConst+920(SB)/8, $0x00000001def6be8c + +// x^201792 mod p(x), x^201728 mod p(x) +DATA ·IEEEConst+928(SB)/8, $0x00000001da0050e4 +DATA ·IEEEConst+936(SB)/8, $0x0000000075258728 + +// x^200768 mod p(x), x^200704 mod p(x) +DATA ·IEEEConst+944(SB)/8, $0x00000000772172ae +DATA ·IEEEConst+952(SB)/8, $0x000000019536090a + +// x^199744 mod p(x), x^199680 mod p(x) +DATA ·IEEEConst+960(SB)/8, $0x00000000e47724aa +DATA ·IEEEConst+968(SB)/8, $0x00000000f2455bfc + +// x^198720 mod p(x), x^198656 mod p(x) +DATA ·IEEEConst+976(SB)/8, $0x000000003cd63ac4 +DATA ·IEEEConst+984(SB)/8, $0x000000018c40baf4 + +// x^197696 mod p(x), x^197632 mod p(x) +DATA ·IEEEConst+992(SB)/8, $0x00000001bf47d352 +DATA ·IEEEConst+1000(SB)/8, $0x000000004cd390d4 + +// x^196672 mod p(x), x^196608 mod p(x) +DATA ·IEEEConst+1008(SB)/8, $0x000000018dc1d708 +DATA ·IEEEConst+1016(SB)/8, $0x00000001e4ece95a + +// x^195648 mod p(x), x^195584 mod p(x) +DATA ·IEEEConst+1024(SB)/8, $0x000000002d4620a4 +DATA ·IEEEConst+1032(SB)/8, $0x000000001a3ee918 + +// x^194624 mod p(x), x^194560 mod p(x) +DATA ·IEEEConst+1040(SB)/8, $0x0000000058fd1740 +DATA ·IEEEConst+1048(SB)/8, $0x000000007c652fb8 + +// x^193600 mod p(x), x^193536 mod p(x) +DATA ·IEEEConst+1056(SB)/8, $0x00000000dadd9bfc +DATA ·IEEEConst+1064(SB)/8, $0x000000011c67842c + +// x^192576 mod p(x), x^192512 mod p(x) +DATA ·IEEEConst+1072(SB)/8, $0x00000001ea2140be +DATA ·IEEEConst+1080(SB)/8, $0x00000000254f759c + +// x^191552 mod p(x), x^191488 mod p(x) +DATA ·IEEEConst+1088(SB)/8, $0x000000009de128ba +DATA ·IEEEConst+1096(SB)/8, $0x000000007ece94ca + +// x^190528 mod p(x), x^190464 mod p(x) +DATA ·IEEEConst+1104(SB)/8, $0x000000013ac3aa8e +DATA ·IEEEConst+1112(SB)/8, $0x0000000038f258c2 + +// x^189504 mod p(x), x^189440 mod p(x) +DATA ·IEEEConst+1120(SB)/8, $0x0000000099980562 +DATA ·IEEEConst+1128(SB)/8, $0x00000001cdf17b00 + +// x^188480 mod p(x), x^188416 mod p(x) +DATA ·IEEEConst+1136(SB)/8, $0x00000001c1579c86 +DATA ·IEEEConst+1144(SB)/8, $0x000000011f882c16 + +// x^187456 mod p(x), x^187392 mod p(x) +DATA ·IEEEConst+1152(SB)/8, $0x0000000068dbbf94 +DATA ·IEEEConst+1160(SB)/8, $0x0000000100093fc8 + +// x^186432 mod p(x), x^186368 mod p(x) +DATA ·IEEEConst+1168(SB)/8, $0x000000004509fb04 +DATA ·IEEEConst+1176(SB)/8, $0x00000001cd684f16 + +// x^185408 mod p(x), x^185344 mod p(x) +DATA ·IEEEConst+1184(SB)/8, $0x00000001202f6398 +DATA ·IEEEConst+1192(SB)/8, $0x000000004bc6a70a + +// x^184384 mod p(x), x^184320 mod p(x) +DATA ·IEEEConst+1200(SB)/8, $0x000000013aea243e +DATA ·IEEEConst+1208(SB)/8, $0x000000004fc7e8e4 + +// x^183360 mod p(x), x^183296 mod p(x) +DATA ·IEEEConst+1216(SB)/8, $0x00000001b4052ae6 +DATA ·IEEEConst+1224(SB)/8, $0x0000000130103f1c + +// x^182336 mod p(x), x^182272 mod p(x) +DATA ·IEEEConst+1232(SB)/8, $0x00000001cd2a0ae8 +DATA ·IEEEConst+1240(SB)/8, $0x0000000111b0024c + +// x^181312 mod p(x), x^181248 mod p(x) +DATA ·IEEEConst+1248(SB)/8, $0x00000001fe4aa8b4 +DATA ·IEEEConst+1256(SB)/8, $0x000000010b3079da + +// x^180288 mod p(x), x^180224 mod p(x) +DATA ·IEEEConst+1264(SB)/8, $0x00000001d1559a42 +DATA ·IEEEConst+1272(SB)/8, $0x000000010192bcc2 + +// x^179264 mod p(x), x^179200 mod p(x) +DATA ·IEEEConst+1280(SB)/8, $0x00000001f3e05ecc +DATA ·IEEEConst+1288(SB)/8, $0x0000000074838d50 + +// x^178240 mod p(x), x^178176 mod p(x) +DATA ·IEEEConst+1296(SB)/8, $0x0000000104ddd2cc +DATA ·IEEEConst+1304(SB)/8, $0x000000001b20f520 + +// x^177216 mod p(x), x^177152 mod p(x) +DATA ·IEEEConst+1312(SB)/8, $0x000000015393153c +DATA ·IEEEConst+1320(SB)/8, $0x0000000050c3590a + +// x^176192 mod p(x), x^176128 mod p(x) +DATA ·IEEEConst+1328(SB)/8, $0x0000000057e942c6 +DATA ·IEEEConst+1336(SB)/8, $0x00000000b41cac8e + +// x^175168 mod p(x), x^175104 mod p(x) +DATA ·IEEEConst+1344(SB)/8, $0x000000012c633850 +DATA ·IEEEConst+1352(SB)/8, $0x000000000c72cc78 + +// x^174144 mod p(x), x^174080 mod p(x) +DATA ·IEEEConst+1360(SB)/8, $0x00000000ebcaae4c +DATA ·IEEEConst+1368(SB)/8, $0x0000000030cdb032 + +// x^173120 mod p(x), x^173056 mod p(x) +DATA ·IEEEConst+1376(SB)/8, $0x000000013ee532a6 +DATA ·IEEEConst+1384(SB)/8, $0x000000013e09fc32 + +// x^172096 mod p(x), x^172032 mod p(x) +DATA ·IEEEConst+1392(SB)/8, $0x00000001bf0cbc7e +DATA ·IEEEConst+1400(SB)/8, $0x000000001ed624d2 + +// x^171072 mod p(x), x^171008 mod p(x) +DATA ·IEEEConst+1408(SB)/8, $0x00000000d50b7a5a +DATA ·IEEEConst+1416(SB)/8, $0x00000000781aee1a + +// x^170048 mod p(x), x^169984 mod p(x) +DATA ·IEEEConst+1424(SB)/8, $0x0000000002fca6e8 +DATA ·IEEEConst+1432(SB)/8, $0x00000001c4d8348c + +// x^169024 mod p(x), x^168960 mod p(x) +DATA ·IEEEConst+1440(SB)/8, $0x000000007af40044 +DATA ·IEEEConst+1448(SB)/8, $0x0000000057a40336 + +// x^168000 mod p(x), x^167936 mod p(x) +DATA ·IEEEConst+1456(SB)/8, $0x0000000016178744 +DATA ·IEEEConst+1464(SB)/8, $0x0000000085544940 + +// x^166976 mod p(x), x^166912 mod p(x) +DATA ·IEEEConst+1472(SB)/8, $0x000000014c177458 +DATA ·IEEEConst+1480(SB)/8, $0x000000019cd21e80 + +// x^165952 mod p(x), x^165888 mod p(x) +DATA ·IEEEConst+1488(SB)/8, $0x000000011b6ddf04 +DATA ·IEEEConst+1496(SB)/8, $0x000000013eb95bc0 + +// x^164928 mod p(x), x^164864 mod p(x) +DATA ·IEEEConst+1504(SB)/8, $0x00000001f3e29ccc +DATA ·IEEEConst+1512(SB)/8, $0x00000001dfc9fdfc + +// x^163904 mod p(x), x^163840 mod p(x) +DATA ·IEEEConst+1520(SB)/8, $0x0000000135ae7562 +DATA ·IEEEConst+1528(SB)/8, $0x00000000cd028bc2 + +// x^162880 mod p(x), x^162816 mod p(x) +DATA ·IEEEConst+1536(SB)/8, $0x0000000190ef812c +DATA ·IEEEConst+1544(SB)/8, $0x0000000090db8c44 + +// x^161856 mod p(x), x^161792 mod p(x) +DATA ·IEEEConst+1552(SB)/8, $0x0000000067a2c786 +DATA ·IEEEConst+1560(SB)/8, $0x000000010010a4ce + +// x^160832 mod p(x), x^160768 mod p(x) +DATA ·IEEEConst+1568(SB)/8, $0x0000000048b9496c +DATA ·IEEEConst+1576(SB)/8, $0x00000001c8f4c72c + +// x^159808 mod p(x), x^159744 mod p(x) +DATA ·IEEEConst+1584(SB)/8, $0x000000015a422de6 +DATA ·IEEEConst+1592(SB)/8, $0x000000001c26170c + +// x^158784 mod p(x), x^158720 mod p(x) +DATA ·IEEEConst+1600(SB)/8, $0x00000001ef0e3640 +DATA ·IEEEConst+1608(SB)/8, $0x00000000e3fccf68 + +// x^157760 mod p(x), x^157696 mod p(x) +DATA ·IEEEConst+1616(SB)/8, $0x00000001006d2d26 +DATA ·IEEEConst+1624(SB)/8, $0x00000000d513ed24 + +// x^156736 mod p(x), x^156672 mod p(x) +DATA ·IEEEConst+1632(SB)/8, $0x00000001170d56d6 +DATA ·IEEEConst+1640(SB)/8, $0x00000000141beada + +// x^155712 mod p(x), x^155648 mod p(x) +DATA ·IEEEConst+1648(SB)/8, $0x00000000a5fb613c +DATA ·IEEEConst+1656(SB)/8, $0x000000011071aea0 + +// x^154688 mod p(x), x^154624 mod p(x) +DATA ·IEEEConst+1664(SB)/8, $0x0000000040bbf7fc +DATA ·IEEEConst+1672(SB)/8, $0x000000012e19080a + +// x^153664 mod p(x), x^153600 mod p(x) +DATA ·IEEEConst+1680(SB)/8, $0x000000016ac3a5b2 +DATA ·IEEEConst+1688(SB)/8, $0x0000000100ecf826 + +// x^152640 mod p(x), x^152576 mod p(x) +DATA ·IEEEConst+1696(SB)/8, $0x00000000abf16230 +DATA ·IEEEConst+1704(SB)/8, $0x0000000069b09412 + +// x^151616 mod p(x), x^151552 mod p(x) +DATA ·IEEEConst+1712(SB)/8, $0x00000001ebe23fac +DATA ·IEEEConst+1720(SB)/8, $0x0000000122297bac + +// x^150592 mod p(x), x^150528 mod p(x) +DATA ·IEEEConst+1728(SB)/8, $0x000000008b6a0894 +DATA ·IEEEConst+1736(SB)/8, $0x00000000e9e4b068 + +// x^149568 mod p(x), x^149504 mod p(x) +DATA ·IEEEConst+1744(SB)/8, $0x00000001288ea478 +DATA ·IEEEConst+1752(SB)/8, $0x000000004b38651a + +// x^148544 mod p(x), x^148480 mod p(x) +DATA ·IEEEConst+1760(SB)/8, $0x000000016619c442 +DATA ·IEEEConst+1768(SB)/8, $0x00000001468360e2 + +// x^147520 mod p(x), x^147456 mod p(x) +DATA ·IEEEConst+1776(SB)/8, $0x0000000086230038 +DATA ·IEEEConst+1784(SB)/8, $0x00000000121c2408 + +// x^146496 mod p(x), x^146432 mod p(x) +DATA ·IEEEConst+1792(SB)/8, $0x000000017746a756 +DATA ·IEEEConst+1800(SB)/8, $0x00000000da7e7d08 + +// x^145472 mod p(x), x^145408 mod p(x) +DATA ·IEEEConst+1808(SB)/8, $0x0000000191b8f8f8 +DATA ·IEEEConst+1816(SB)/8, $0x00000001058d7652 + +// x^144448 mod p(x), x^144384 mod p(x) +DATA ·IEEEConst+1824(SB)/8, $0x000000008e167708 +DATA ·IEEEConst+1832(SB)/8, $0x000000014a098a90 + +// x^143424 mod p(x), x^143360 mod p(x) +DATA ·IEEEConst+1840(SB)/8, $0x0000000148b22d54 +DATA ·IEEEConst+1848(SB)/8, $0x0000000020dbe72e + +// x^142400 mod p(x), x^142336 mod p(x) +DATA ·IEEEConst+1856(SB)/8, $0x0000000044ba2c3c +DATA ·IEEEConst+1864(SB)/8, $0x000000011e7323e8 + +// x^141376 mod p(x), x^141312 mod p(x) +DATA ·IEEEConst+1872(SB)/8, $0x00000000b54d2b52 +DATA ·IEEEConst+1880(SB)/8, $0x00000000d5d4bf94 + +// x^140352 mod p(x), x^140288 mod p(x) +DATA ·IEEEConst+1888(SB)/8, $0x0000000005a4fd8a +DATA ·IEEEConst+1896(SB)/8, $0x0000000199d8746c + +// x^139328 mod p(x), x^139264 mod p(x) +DATA ·IEEEConst+1904(SB)/8, $0x0000000139f9fc46 +DATA ·IEEEConst+1912(SB)/8, $0x00000000ce9ca8a0 + +// x^138304 mod p(x), x^138240 mod p(x) +DATA ·IEEEConst+1920(SB)/8, $0x000000015a1fa824 +DATA ·IEEEConst+1928(SB)/8, $0x00000000136edece + +// x^137280 mod p(x), x^137216 mod p(x) +DATA ·IEEEConst+1936(SB)/8, $0x000000000a61ae4c +DATA ·IEEEConst+1944(SB)/8, $0x000000019b92a068 + +// x^136256 mod p(x), x^136192 mod p(x) +DATA ·IEEEConst+1952(SB)/8, $0x0000000145e9113e +DATA ·IEEEConst+1960(SB)/8, $0x0000000071d62206 + +// x^135232 mod p(x), x^135168 mod p(x) +DATA ·IEEEConst+1968(SB)/8, $0x000000006a348448 +DATA ·IEEEConst+1976(SB)/8, $0x00000000dfc50158 + +// x^134208 mod p(x), x^134144 mod p(x) +DATA ·IEEEConst+1984(SB)/8, $0x000000004d80a08c +DATA ·IEEEConst+1992(SB)/8, $0x00000001517626bc + +// x^133184 mod p(x), x^133120 mod p(x) +DATA ·IEEEConst+2000(SB)/8, $0x000000014b6837a0 +DATA ·IEEEConst+2008(SB)/8, $0x0000000148d1e4fa + +// x^132160 mod p(x), x^132096 mod p(x) +DATA ·IEEEConst+2016(SB)/8, $0x000000016896a7fc +DATA ·IEEEConst+2024(SB)/8, $0x0000000094d8266e + +// x^131136 mod p(x), x^131072 mod p(x) +DATA ·IEEEConst+2032(SB)/8, $0x000000014f187140 +DATA ·IEEEConst+2040(SB)/8, $0x00000000606c5e34 + +// x^130112 mod p(x), x^130048 mod p(x) +DATA ·IEEEConst+2048(SB)/8, $0x000000019581b9da +DATA ·IEEEConst+2056(SB)/8, $0x000000019766beaa + +// x^129088 mod p(x), x^129024 mod p(x) +DATA ·IEEEConst+2064(SB)/8, $0x00000001091bc984 +DATA ·IEEEConst+2072(SB)/8, $0x00000001d80c506c + +// x^128064 mod p(x), x^128000 mod p(x) +DATA ·IEEEConst+2080(SB)/8, $0x000000001067223c +DATA ·IEEEConst+2088(SB)/8, $0x000000001e73837c + +// x^127040 mod p(x), x^126976 mod p(x) +DATA ·IEEEConst+2096(SB)/8, $0x00000001ab16ea02 +DATA ·IEEEConst+2104(SB)/8, $0x0000000064d587de + +// x^126016 mod p(x), x^125952 mod p(x) +DATA ·IEEEConst+2112(SB)/8, $0x000000013c4598a8 +DATA ·IEEEConst+2120(SB)/8, $0x00000000f4a507b0 + +// x^124992 mod p(x), x^124928 mod p(x) +DATA ·IEEEConst+2128(SB)/8, $0x00000000b3735430 +DATA ·IEEEConst+2136(SB)/8, $0x0000000040e342fc + +// x^123968 mod p(x), x^123904 mod p(x) +DATA ·IEEEConst+2144(SB)/8, $0x00000001bb3fc0c0 +DATA ·IEEEConst+2152(SB)/8, $0x00000001d5ad9c3a + +// x^122944 mod p(x), x^122880 mod p(x) +DATA ·IEEEConst+2160(SB)/8, $0x00000001570ae19c +DATA ·IEEEConst+2168(SB)/8, $0x0000000094a691a4 + +// x^121920 mod p(x), x^121856 mod p(x) +DATA ·IEEEConst+2176(SB)/8, $0x00000001ea910712 +DATA ·IEEEConst+2184(SB)/8, $0x00000001271ecdfa + +// x^120896 mod p(x), x^120832 mod p(x) +DATA ·IEEEConst+2192(SB)/8, $0x0000000167127128 +DATA ·IEEEConst+2200(SB)/8, $0x000000009e54475a + +// x^119872 mod p(x), x^119808 mod p(x) +DATA ·IEEEConst+2208(SB)/8, $0x0000000019e790a2 +DATA ·IEEEConst+2216(SB)/8, $0x00000000c9c099ee + +// x^118848 mod p(x), x^118784 mod p(x) +DATA ·IEEEConst+2224(SB)/8, $0x000000003788f710 +DATA ·IEEEConst+2232(SB)/8, $0x000000009a2f736c + +// x^117824 mod p(x), x^117760 mod p(x) +DATA ·IEEEConst+2240(SB)/8, $0x00000001682a160e +DATA ·IEEEConst+2248(SB)/8, $0x00000000bb9f4996 + +// x^116800 mod p(x), x^116736 mod p(x) +DATA ·IEEEConst+2256(SB)/8, $0x000000007f0ebd2e +DATA ·IEEEConst+2264(SB)/8, $0x00000001db688050 + +// x^115776 mod p(x), x^115712 mod p(x) +DATA ·IEEEConst+2272(SB)/8, $0x000000002b032080 +DATA ·IEEEConst+2280(SB)/8, $0x00000000e9b10af4 + +// x^114752 mod p(x), x^114688 mod p(x) +DATA ·IEEEConst+2288(SB)/8, $0x00000000cfd1664a +DATA ·IEEEConst+2296(SB)/8, $0x000000012d4545e4 + +// x^113728 mod p(x), x^113664 mod p(x) +DATA ·IEEEConst+2304(SB)/8, $0x00000000aa1181c2 +DATA ·IEEEConst+2312(SB)/8, $0x000000000361139c + +// x^112704 mod p(x), x^112640 mod p(x) +DATA ·IEEEConst+2320(SB)/8, $0x00000000ddd08002 +DATA ·IEEEConst+2328(SB)/8, $0x00000001a5a1a3a8 + +// x^111680 mod p(x), x^111616 mod p(x) +DATA ·IEEEConst+2336(SB)/8, $0x00000000e8dd0446 +DATA ·IEEEConst+2344(SB)/8, $0x000000006844e0b0 + +// x^110656 mod p(x), x^110592 mod p(x) +DATA ·IEEEConst+2352(SB)/8, $0x00000001bbd94a00 +DATA ·IEEEConst+2360(SB)/8, $0x00000000c3762f28 + +// x^109632 mod p(x), x^109568 mod p(x) +DATA ·IEEEConst+2368(SB)/8, $0x00000000ab6cd180 +DATA ·IEEEConst+2376(SB)/8, $0x00000001d26287a2 + +// x^108608 mod p(x), x^108544 mod p(x) +DATA ·IEEEConst+2384(SB)/8, $0x0000000031803ce2 +DATA ·IEEEConst+2392(SB)/8, $0x00000001f6f0bba8 + +// x^107584 mod p(x), x^107520 mod p(x) +DATA ·IEEEConst+2400(SB)/8, $0x0000000024f40b0c +DATA ·IEEEConst+2408(SB)/8, $0x000000002ffabd62 + +// x^106560 mod p(x), x^106496 mod p(x) +DATA ·IEEEConst+2416(SB)/8, $0x00000001ba1d9834 +DATA ·IEEEConst+2424(SB)/8, $0x00000000fb4516b8 + +// x^105536 mod p(x), x^105472 mod p(x) +DATA ·IEEEConst+2432(SB)/8, $0x0000000104de61aa +DATA ·IEEEConst+2440(SB)/8, $0x000000018cfa961c + +// x^104512 mod p(x), x^104448 mod p(x) +DATA ·IEEEConst+2448(SB)/8, $0x0000000113e40d46 +DATA ·IEEEConst+2456(SB)/8, $0x000000019e588d52 + +// x^103488 mod p(x), x^103424 mod p(x) +DATA ·IEEEConst+2464(SB)/8, $0x00000001415598a0 +DATA ·IEEEConst+2472(SB)/8, $0x00000001180f0bbc + +// x^102464 mod p(x), x^102400 mod p(x) +DATA ·IEEEConst+2480(SB)/8, $0x00000000bf6c8c90 +DATA ·IEEEConst+2488(SB)/8, $0x00000000e1d9177a + +// x^101440 mod p(x), x^101376 mod p(x) +DATA ·IEEEConst+2496(SB)/8, $0x00000001788b0504 +DATA ·IEEEConst+2504(SB)/8, $0x0000000105abc27c + +// x^100416 mod p(x), x^100352 mod p(x) +DATA ·IEEEConst+2512(SB)/8, $0x0000000038385d02 +DATA ·IEEEConst+2520(SB)/8, $0x00000000972e4a58 + +// x^99392 mod p(x), x^99328 mod p(x) +DATA ·IEEEConst+2528(SB)/8, $0x00000001b6c83844 +DATA ·IEEEConst+2536(SB)/8, $0x0000000183499a5e + +// x^98368 mod p(x), x^98304 mod p(x) +DATA ·IEEEConst+2544(SB)/8, $0x0000000051061a8a +DATA ·IEEEConst+2552(SB)/8, $0x00000001c96a8cca + +// x^97344 mod p(x), x^97280 mod p(x) +DATA ·IEEEConst+2560(SB)/8, $0x000000017351388a +DATA ·IEEEConst+2568(SB)/8, $0x00000001a1a5b60c + +// x^96320 mod p(x), x^96256 mod p(x) +DATA ·IEEEConst+2576(SB)/8, $0x0000000132928f92 +DATA ·IEEEConst+2584(SB)/8, $0x00000000e4b6ac9c + +// x^95296 mod p(x), x^95232 mod p(x) +DATA ·IEEEConst+2592(SB)/8, $0x00000000e6b4f48a +DATA ·IEEEConst+2600(SB)/8, $0x00000001807e7f5a + +// x^94272 mod p(x), x^94208 mod p(x) +DATA ·IEEEConst+2608(SB)/8, $0x0000000039d15e90 +DATA ·IEEEConst+2616(SB)/8, $0x000000017a7e3bc8 + +// x^93248 mod p(x), x^93184 mod p(x) +DATA ·IEEEConst+2624(SB)/8, $0x00000000312d6074 +DATA ·IEEEConst+2632(SB)/8, $0x00000000d73975da + +// x^92224 mod p(x), x^92160 mod p(x) +DATA ·IEEEConst+2640(SB)/8, $0x000000017bbb2cc4 +DATA ·IEEEConst+2648(SB)/8, $0x000000017375d038 + +// x^91200 mod p(x), x^91136 mod p(x) +DATA ·IEEEConst+2656(SB)/8, $0x000000016ded3e18 +DATA ·IEEEConst+2664(SB)/8, $0x00000000193680bc + +// x^90176 mod p(x), x^90112 mod p(x) +DATA ·IEEEConst+2672(SB)/8, $0x00000000f1638b16 +DATA ·IEEEConst+2680(SB)/8, $0x00000000999b06f6 + +// x^89152 mod p(x), x^89088 mod p(x) +DATA ·IEEEConst+2688(SB)/8, $0x00000001d38b9ecc +DATA ·IEEEConst+2696(SB)/8, $0x00000001f685d2b8 + +// x^88128 mod p(x), x^88064 mod p(x) +DATA ·IEEEConst+2704(SB)/8, $0x000000018b8d09dc +DATA ·IEEEConst+2712(SB)/8, $0x00000001f4ecbed2 + +// x^87104 mod p(x), x^87040 mod p(x) +DATA ·IEEEConst+2720(SB)/8, $0x00000000e7bc27d2 +DATA ·IEEEConst+2728(SB)/8, $0x00000000ba16f1a0 + +// x^86080 mod p(x), x^86016 mod p(x) +DATA ·IEEEConst+2736(SB)/8, $0x00000000275e1e96 +DATA ·IEEEConst+2744(SB)/8, $0x0000000115aceac4 + +// x^85056 mod p(x), x^84992 mod p(x) +DATA ·IEEEConst+2752(SB)/8, $0x00000000e2e3031e +DATA ·IEEEConst+2760(SB)/8, $0x00000001aeff6292 + +// x^84032 mod p(x), x^83968 mod p(x) +DATA ·IEEEConst+2768(SB)/8, $0x00000001041c84d8 +DATA ·IEEEConst+2776(SB)/8, $0x000000009640124c + +// x^83008 mod p(x), x^82944 mod p(x) +DATA ·IEEEConst+2784(SB)/8, $0x00000000706ce672 +DATA ·IEEEConst+2792(SB)/8, $0x0000000114f41f02 + +// x^81984 mod p(x), x^81920 mod p(x) +DATA ·IEEEConst+2800(SB)/8, $0x000000015d5070da +DATA ·IEEEConst+2808(SB)/8, $0x000000009c5f3586 + +// x^80960 mod p(x), x^80896 mod p(x) +DATA ·IEEEConst+2816(SB)/8, $0x0000000038f9493a +DATA ·IEEEConst+2824(SB)/8, $0x00000001878275fa + +// x^79936 mod p(x), x^79872 mod p(x) +DATA ·IEEEConst+2832(SB)/8, $0x00000000a3348a76 +DATA ·IEEEConst+2840(SB)/8, $0x00000000ddc42ce8 + +// x^78912 mod p(x), x^78848 mod p(x) +DATA ·IEEEConst+2848(SB)/8, $0x00000001ad0aab92 +DATA ·IEEEConst+2856(SB)/8, $0x0000000181d2c73a + +// x^77888 mod p(x), x^77824 mod p(x) +DATA ·IEEEConst+2864(SB)/8, $0x000000019e85f712 +DATA ·IEEEConst+2872(SB)/8, $0x0000000141c9320a + +// x^76864 mod p(x), x^76800 mod p(x) +DATA ·IEEEConst+2880(SB)/8, $0x000000005a871e76 +DATA ·IEEEConst+2888(SB)/8, $0x000000015235719a + +// x^75840 mod p(x), x^75776 mod p(x) +DATA ·IEEEConst+2896(SB)/8, $0x000000017249c662 +DATA ·IEEEConst+2904(SB)/8, $0x00000000be27d804 + +// x^74816 mod p(x), x^74752 mod p(x) +DATA ·IEEEConst+2912(SB)/8, $0x000000003a084712 +DATA ·IEEEConst+2920(SB)/8, $0x000000006242d45a + +// x^73792 mod p(x), x^73728 mod p(x) +DATA ·IEEEConst+2928(SB)/8, $0x00000000ed438478 +DATA ·IEEEConst+2936(SB)/8, $0x000000009a53638e + +// x^72768 mod p(x), x^72704 mod p(x) +DATA ·IEEEConst+2944(SB)/8, $0x00000000abac34cc +DATA ·IEEEConst+2952(SB)/8, $0x00000001001ecfb6 + +// x^71744 mod p(x), x^71680 mod p(x) +DATA ·IEEEConst+2960(SB)/8, $0x000000005f35ef3e +DATA ·IEEEConst+2968(SB)/8, $0x000000016d7c2d64 + +// x^70720 mod p(x), x^70656 mod p(x) +DATA ·IEEEConst+2976(SB)/8, $0x0000000047d6608c +DATA ·IEEEConst+2984(SB)/8, $0x00000001d0ce46c0 + +// x^69696 mod p(x), x^69632 mod p(x) +DATA ·IEEEConst+2992(SB)/8, $0x000000002d01470e +DATA ·IEEEConst+3000(SB)/8, $0x0000000124c907b4 + +// x^68672 mod p(x), x^68608 mod p(x) +DATA ·IEEEConst+3008(SB)/8, $0x0000000158bbc7b0 +DATA ·IEEEConst+3016(SB)/8, $0x0000000018a555ca + +// x^67648 mod p(x), x^67584 mod p(x) +DATA ·IEEEConst+3024(SB)/8, $0x00000000c0a23e8e +DATA ·IEEEConst+3032(SB)/8, $0x000000006b0980bc + +// x^66624 mod p(x), x^66560 mod p(x) +DATA ·IEEEConst+3040(SB)/8, $0x00000001ebd85c88 +DATA ·IEEEConst+3048(SB)/8, $0x000000008bbba964 + +// x^65600 mod p(x), x^65536 mod p(x) +DATA ·IEEEConst+3056(SB)/8, $0x000000019ee20bb2 +DATA ·IEEEConst+3064(SB)/8, $0x00000001070a5a1e + +// x^64576 mod p(x), x^64512 mod p(x) +DATA ·IEEEConst+3072(SB)/8, $0x00000001acabf2d6 +DATA ·IEEEConst+3080(SB)/8, $0x000000002204322a + +// x^63552 mod p(x), x^63488 mod p(x) +DATA ·IEEEConst+3088(SB)/8, $0x00000001b7963d56 +DATA ·IEEEConst+3096(SB)/8, $0x00000000a27524d0 + +// x^62528 mod p(x), x^62464 mod p(x) +DATA ·IEEEConst+3104(SB)/8, $0x000000017bffa1fe +DATA ·IEEEConst+3112(SB)/8, $0x0000000020b1e4ba + +// x^61504 mod p(x), x^61440 mod p(x) +DATA ·IEEEConst+3120(SB)/8, $0x000000001f15333e +DATA ·IEEEConst+3128(SB)/8, $0x0000000032cc27fc + +// x^60480 mod p(x), x^60416 mod p(x) +DATA ·IEEEConst+3136(SB)/8, $0x000000018593129e +DATA ·IEEEConst+3144(SB)/8, $0x0000000044dd22b8 + +// x^59456 mod p(x), x^59392 mod p(x) +DATA ·IEEEConst+3152(SB)/8, $0x000000019cb32602 +DATA ·IEEEConst+3160(SB)/8, $0x00000000dffc9e0a + +// x^58432 mod p(x), x^58368 mod p(x) +DATA ·IEEEConst+3168(SB)/8, $0x0000000142b05cc8 +DATA ·IEEEConst+3176(SB)/8, $0x00000001b7a0ed14 + +// x^57408 mod p(x), x^57344 mod p(x) +DATA ·IEEEConst+3184(SB)/8, $0x00000001be49e7a4 +DATA ·IEEEConst+3192(SB)/8, $0x00000000c7842488 + +// x^56384 mod p(x), x^56320 mod p(x) +DATA ·IEEEConst+3200(SB)/8, $0x0000000108f69d6c +DATA ·IEEEConst+3208(SB)/8, $0x00000001c02a4fee + +// x^55360 mod p(x), x^55296 mod p(x) +DATA ·IEEEConst+3216(SB)/8, $0x000000006c0971f0 +DATA ·IEEEConst+3224(SB)/8, $0x000000003c273778 + +// x^54336 mod p(x), x^54272 mod p(x) +DATA ·IEEEConst+3232(SB)/8, $0x000000005b16467a +DATA ·IEEEConst+3240(SB)/8, $0x00000001d63f8894 + +// x^53312 mod p(x), x^53248 mod p(x) +DATA ·IEEEConst+3248(SB)/8, $0x00000001551a628e +DATA ·IEEEConst+3256(SB)/8, $0x000000006be557d6 + +// x^52288 mod p(x), x^52224 mod p(x) +DATA ·IEEEConst+3264(SB)/8, $0x000000019e42ea92 +DATA ·IEEEConst+3272(SB)/8, $0x000000006a7806ea + +// x^51264 mod p(x), x^51200 mod p(x) +DATA ·IEEEConst+3280(SB)/8, $0x000000012fa83ff2 +DATA ·IEEEConst+3288(SB)/8, $0x000000016155aa0c + +// x^50240 mod p(x), x^50176 mod p(x) +DATA ·IEEEConst+3296(SB)/8, $0x000000011ca9cde0 +DATA ·IEEEConst+3304(SB)/8, $0x00000000908650ac + +// x^49216 mod p(x), x^49152 mod p(x) +DATA ·IEEEConst+3312(SB)/8, $0x00000000c8e5cd74 +DATA ·IEEEConst+3320(SB)/8, $0x00000000aa5a8084 + +// x^48192 mod p(x), x^48128 mod p(x) +DATA ·IEEEConst+3328(SB)/8, $0x0000000096c27f0c +DATA ·IEEEConst+3336(SB)/8, $0x0000000191bb500a + +// x^47168 mod p(x), x^47104 mod p(x) +DATA ·IEEEConst+3344(SB)/8, $0x000000002baed926 +DATA ·IEEEConst+3352(SB)/8, $0x0000000064e9bed0 + +// x^46144 mod p(x), x^46080 mod p(x) +DATA ·IEEEConst+3360(SB)/8, $0x000000017c8de8d2 +DATA ·IEEEConst+3368(SB)/8, $0x000000009444f302 + +// x^45120 mod p(x), x^45056 mod p(x) +DATA ·IEEEConst+3376(SB)/8, $0x00000000d43d6068 +DATA ·IEEEConst+3384(SB)/8, $0x000000019db07d3c + +// x^44096 mod p(x), x^44032 mod p(x) +DATA ·IEEEConst+3392(SB)/8, $0x00000000cb2c4b26 +DATA ·IEEEConst+3400(SB)/8, $0x00000001359e3e6e + +// x^43072 mod p(x), x^43008 mod p(x) +DATA ·IEEEConst+3408(SB)/8, $0x0000000145b8da26 +DATA ·IEEEConst+3416(SB)/8, $0x00000001e4f10dd2 + +// x^42048 mod p(x), x^41984 mod p(x) +DATA ·IEEEConst+3424(SB)/8, $0x000000018fff4b08 +DATA ·IEEEConst+3432(SB)/8, $0x0000000124f5735e + +// x^41024 mod p(x), x^40960 mod p(x) +DATA ·IEEEConst+3440(SB)/8, $0x0000000150b58ed0 +DATA ·IEEEConst+3448(SB)/8, $0x0000000124760a4c + +// x^40000 mod p(x), x^39936 mod p(x) +DATA ·IEEEConst+3456(SB)/8, $0x00000001549f39bc +DATA ·IEEEConst+3464(SB)/8, $0x000000000f1fc186 + +// x^38976 mod p(x), x^38912 mod p(x) +DATA ·IEEEConst+3472(SB)/8, $0x00000000ef4d2f42 +DATA ·IEEEConst+3480(SB)/8, $0x00000000150e4cc4 + +// x^37952 mod p(x), x^37888 mod p(x) +DATA ·IEEEConst+3488(SB)/8, $0x00000001b1468572 +DATA ·IEEEConst+3496(SB)/8, $0x000000002a6204e8 + +// x^36928 mod p(x), x^36864 mod p(x) +DATA ·IEEEConst+3504(SB)/8, $0x000000013d7403b2 +DATA ·IEEEConst+3512(SB)/8, $0x00000000beb1d432 + +// x^35904 mod p(x), x^35840 mod p(x) +DATA ·IEEEConst+3520(SB)/8, $0x00000001a4681842 +DATA ·IEEEConst+3528(SB)/8, $0x0000000135f3f1f0 + +// x^34880 mod p(x), x^34816 mod p(x) +DATA ·IEEEConst+3536(SB)/8, $0x0000000167714492 +DATA ·IEEEConst+3544(SB)/8, $0x0000000074fe2232 + +// x^33856 mod p(x), x^33792 mod p(x) +DATA ·IEEEConst+3552(SB)/8, $0x00000001e599099a +DATA ·IEEEConst+3560(SB)/8, $0x000000001ac6e2ba + +// x^32832 mod p(x), x^32768 mod p(x) +DATA ·IEEEConst+3568(SB)/8, $0x00000000fe128194 +DATA ·IEEEConst+3576(SB)/8, $0x0000000013fca91e + +// x^31808 mod p(x), x^31744 mod p(x) +DATA ·IEEEConst+3584(SB)/8, $0x0000000077e8b990 +DATA ·IEEEConst+3592(SB)/8, $0x0000000183f4931e + +// x^30784 mod p(x), x^30720 mod p(x) +DATA ·IEEEConst+3600(SB)/8, $0x00000001a267f63a +DATA ·IEEEConst+3608(SB)/8, $0x00000000b6d9b4e4 + +// x^29760 mod p(x), x^29696 mod p(x) +DATA ·IEEEConst+3616(SB)/8, $0x00000001945c245a +DATA ·IEEEConst+3624(SB)/8, $0x00000000b5188656 + +// x^28736 mod p(x), x^28672 mod p(x) +DATA ·IEEEConst+3632(SB)/8, $0x0000000149002e76 +DATA ·IEEEConst+3640(SB)/8, $0x0000000027a81a84 + +// x^27712 mod p(x), x^27648 mod p(x) +DATA ·IEEEConst+3648(SB)/8, $0x00000001bb8310a4 +DATA ·IEEEConst+3656(SB)/8, $0x0000000125699258 + +// x^26688 mod p(x), x^26624 mod p(x) +DATA ·IEEEConst+3664(SB)/8, $0x000000019ec60bcc +DATA ·IEEEConst+3672(SB)/8, $0x00000001b23de796 + +// x^25664 mod p(x), x^25600 mod p(x) +DATA ·IEEEConst+3680(SB)/8, $0x000000012d8590ae +DATA ·IEEEConst+3688(SB)/8, $0x00000000fe4365dc + +// x^24640 mod p(x), x^24576 mod p(x) +DATA ·IEEEConst+3696(SB)/8, $0x0000000065b00684 +DATA ·IEEEConst+3704(SB)/8, $0x00000000c68f497a + +// x^23616 mod p(x), x^23552 mod p(x) +DATA ·IEEEConst+3712(SB)/8, $0x000000015e5aeadc +DATA ·IEEEConst+3720(SB)/8, $0x00000000fbf521ee + +// x^22592 mod p(x), x^22528 mod p(x) +DATA ·IEEEConst+3728(SB)/8, $0x00000000b77ff2b0 +DATA ·IEEEConst+3736(SB)/8, $0x000000015eac3378 + +// x^21568 mod p(x), x^21504 mod p(x) +DATA ·IEEEConst+3744(SB)/8, $0x0000000188da2ff6 +DATA ·IEEEConst+3752(SB)/8, $0x0000000134914b90 + +// x^20544 mod p(x), x^20480 mod p(x) +DATA ·IEEEConst+3760(SB)/8, $0x0000000063da929a +DATA ·IEEEConst+3768(SB)/8, $0x0000000016335cfe + +// x^19520 mod p(x), x^19456 mod p(x) +DATA ·IEEEConst+3776(SB)/8, $0x00000001389caa80 +DATA ·IEEEConst+3784(SB)/8, $0x000000010372d10c + +// x^18496 mod p(x), x^18432 mod p(x) +DATA ·IEEEConst+3792(SB)/8, $0x000000013db599d2 +DATA ·IEEEConst+3800(SB)/8, $0x000000015097b908 + +// x^17472 mod p(x), x^17408 mod p(x) +DATA ·IEEEConst+3808(SB)/8, $0x0000000122505a86 +DATA ·IEEEConst+3816(SB)/8, $0x00000001227a7572 + +// x^16448 mod p(x), x^16384 mod p(x) +DATA ·IEEEConst+3824(SB)/8, $0x000000016bd72746 +DATA ·IEEEConst+3832(SB)/8, $0x000000009a8f75c0 + +// x^15424 mod p(x), x^15360 mod p(x) +DATA ·IEEEConst+3840(SB)/8, $0x00000001c3faf1d4 +DATA ·IEEEConst+3848(SB)/8, $0x00000000682c77a2 + +// x^14400 mod p(x), x^14336 mod p(x) +DATA ·IEEEConst+3856(SB)/8, $0x00000001111c826c +DATA ·IEEEConst+3864(SB)/8, $0x00000000231f091c + +// x^13376 mod p(x), x^13312 mod p(x) +DATA ·IEEEConst+3872(SB)/8, $0x00000000153e9fb2 +DATA ·IEEEConst+3880(SB)/8, $0x000000007d4439f2 + +// x^12352 mod p(x), x^12288 mod p(x) +DATA ·IEEEConst+3888(SB)/8, $0x000000002b1f7b60 +DATA ·IEEEConst+3896(SB)/8, $0x000000017e221efc + +// x^11328 mod p(x), x^11264 mod p(x) +DATA ·IEEEConst+3904(SB)/8, $0x00000000b1dba570 +DATA ·IEEEConst+3912(SB)/8, $0x0000000167457c38 + +// x^10304 mod p(x), x^10240 mod p(x) +DATA ·IEEEConst+3920(SB)/8, $0x00000001f6397b76 +DATA ·IEEEConst+3928(SB)/8, $0x00000000bdf081c4 + +// x^9280 mod p(x), x^9216 mod p(x) +DATA ·IEEEConst+3936(SB)/8, $0x0000000156335214 +DATA ·IEEEConst+3944(SB)/8, $0x000000016286d6b0 + +// x^8256 mod p(x), x^8192 mod p(x) +DATA ·IEEEConst+3952(SB)/8, $0x00000001d70e3986 +DATA ·IEEEConst+3960(SB)/8, $0x00000000c84f001c + +// x^7232 mod p(x), x^7168 mod p(x) +DATA ·IEEEConst+3968(SB)/8, $0x000000003701a774 +DATA ·IEEEConst+3976(SB)/8, $0x0000000064efe7c0 + +// x^6208 mod p(x), x^6144 mod p(x) +DATA ·IEEEConst+3984(SB)/8, $0x00000000ac81ef72 +DATA ·IEEEConst+3992(SB)/8, $0x000000000ac2d904 + +// x^5184 mod p(x), x^5120 mod p(x) +DATA ·IEEEConst+4000(SB)/8, $0x0000000133212464 +DATA ·IEEEConst+4008(SB)/8, $0x00000000fd226d14 + +// x^4160 mod p(x), x^4096 mod p(x) +DATA ·IEEEConst+4016(SB)/8, $0x00000000e4e45610 +DATA ·IEEEConst+4024(SB)/8, $0x000000011cfd42e0 + +// x^3136 mod p(x), x^3072 mod p(x) +DATA ·IEEEConst+4032(SB)/8, $0x000000000c1bd370 +DATA ·IEEEConst+4040(SB)/8, $0x000000016e5a5678 + +// x^2112 mod p(x), x^2048 mod p(x) +DATA ·IEEEConst+4048(SB)/8, $0x00000001a7b9e7a6 +DATA ·IEEEConst+4056(SB)/8, $0x00000001d888fe22 + +// x^1088 mod p(x), x^1024 mod p(x) +DATA ·IEEEConst+4064(SB)/8, $0x000000007d657a10 +DATA ·IEEEConst+4072(SB)/8, $0x00000001af77fcd4 + +// x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) +DATA ·IEEEConst+4080(SB)/8, $0x99168a18ec447f11 +DATA ·IEEEConst+4088(SB)/8, $0xed837b2613e8221e + +// x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) +DATA ·IEEEConst+4096(SB)/8, $0xe23e954e8fd2cd3c +DATA ·IEEEConst+4104(SB)/8, $0xc8acdd8147b9ce5a + +// x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) +DATA ·IEEEConst+4112(SB)/8, $0x92f8befe6b1d2b53 +DATA ·IEEEConst+4120(SB)/8, $0xd9ad6d87d4277e25 + +// x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) +DATA ·IEEEConst+4128(SB)/8, $0xf38a3556291ea462 +DATA ·IEEEConst+4136(SB)/8, $0xc10ec5e033fbca3b + +// x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) +DATA ·IEEEConst+4144(SB)/8, $0x974ac56262b6ca4b +DATA ·IEEEConst+4152(SB)/8, $0xc0b55b0e82e02e2f + +// x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) +DATA ·IEEEConst+4160(SB)/8, $0x855712b3784d2a56 +DATA ·IEEEConst+4168(SB)/8, $0x71aa1df0e172334d + +// x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) +DATA ·IEEEConst+4176(SB)/8, $0xa5abe9f80eaee722 +DATA ·IEEEConst+4184(SB)/8, $0xfee3053e3969324d + +// x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) +DATA ·IEEEConst+4192(SB)/8, $0x1fa0943ddb54814c +DATA ·IEEEConst+4200(SB)/8, $0xf44779b93eb2bd08 + +// x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) +DATA ·IEEEConst+4208(SB)/8, $0xa53ff440d7bbfe6a +DATA ·IEEEConst+4216(SB)/8, $0xf5449b3f00cc3374 + +// x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) +DATA ·IEEEConst+4224(SB)/8, $0xebe7e3566325605c +DATA ·IEEEConst+4232(SB)/8, $0x6f8346e1d777606e + +// x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) +DATA ·IEEEConst+4240(SB)/8, $0xc65a272ce5b592b8 +DATA ·IEEEConst+4248(SB)/8, $0xe3ab4f2ac0b95347 + +// x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) +DATA ·IEEEConst+4256(SB)/8, $0x5705a9ca4721589f +DATA ·IEEEConst+4264(SB)/8, $0xaa2215ea329ecc11 + +// x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) +DATA ·IEEEConst+4272(SB)/8, $0xe3720acb88d14467 +DATA ·IEEEConst+4280(SB)/8, $0x1ed8f66ed95efd26 + +// x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) +DATA ·IEEEConst+4288(SB)/8, $0xba1aca0315141c31 +DATA ·IEEEConst+4296(SB)/8, $0x78ed02d5a700e96a + +// x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) +DATA ·IEEEConst+4304(SB)/8, $0xad2a31b3ed627dae +DATA ·IEEEConst+4312(SB)/8, $0xba8ccbe832b39da3 + +// x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) +DATA ·IEEEConst+4320(SB)/8, $0x6655004fa06a2517 +DATA ·IEEEConst+4328(SB)/8, $0xedb88320b1e6b092 + +GLOBL ·IEEEConst(SB), RODATA, $4336 + +// Barrett constant m - (4^32)/n +DATA ·IEEEBarConst(SB)/8, $0x00000001f7011641 +DATA ·IEEEBarConst+8(SB)/8, $0x0000000000000000 +DATA ·IEEEBarConst+16(SB)/8, $0x00000001db710641 +DATA ·IEEEBarConst+24(SB)/8, $0x0000000000000000 +GLOBL ·IEEEBarConst(SB), RODATA, $32 + +// Reduce 262144 kbits to 1024 bits +// x^261184 mod p(x), x^261120 mod p(x) +DATA ·CastConst+0(SB)/8, $0x000000009c37c408 +DATA ·CastConst+8(SB)/8, $0x00000000b6ca9e20 + +// x^260160 mod p(x), x^260096 mod p(x) +DATA ·CastConst+16(SB)/8, $0x00000001b51df26c +DATA ·CastConst+24(SB)/8, $0x00000000350249a8 + +// x^259136 mod p(x), x^259072 mod p(x) +DATA ·CastConst+32(SB)/8, $0x000000000724b9d0 +DATA ·CastConst+40(SB)/8, $0x00000001862dac54 + +// x^258112 mod p(x), x^258048 mod p(x) +DATA ·CastConst+48(SB)/8, $0x00000001c00532fe +DATA ·CastConst+56(SB)/8, $0x00000001d87fb48c + +// x^257088 mod p(x), x^257024 mod p(x) +DATA ·CastConst+64(SB)/8, $0x00000000f05a9362 +DATA ·CastConst+72(SB)/8, $0x00000001f39b699e + +// x^256064 mod p(x), x^256000 mod p(x) +DATA ·CastConst+80(SB)/8, $0x00000001e1007970 +DATA ·CastConst+88(SB)/8, $0x0000000101da11b4 + +// x^255040 mod p(x), x^254976 mod p(x) +DATA ·CastConst+96(SB)/8, $0x00000000a57366ee +DATA ·CastConst+104(SB)/8, $0x00000001cab571e0 + +// x^254016 mod p(x), x^253952 mod p(x) +DATA ·CastConst+112(SB)/8, $0x0000000192011284 +DATA ·CastConst+120(SB)/8, $0x00000000c7020cfe + +// x^252992 mod p(x), x^252928 mod p(x) +DATA ·CastConst+128(SB)/8, $0x0000000162716d9a +DATA ·CastConst+136(SB)/8, $0x00000000cdaed1ae + +// x^251968 mod p(x), x^251904 mod p(x) +DATA ·CastConst+144(SB)/8, $0x00000000cd97ecde +DATA ·CastConst+152(SB)/8, $0x00000001e804effc + +// x^250944 mod p(x), x^250880 mod p(x) +DATA ·CastConst+160(SB)/8, $0x0000000058812bc0 +DATA ·CastConst+168(SB)/8, $0x0000000077c3ea3a + +// x^249920 mod p(x), x^249856 mod p(x) +DATA ·CastConst+176(SB)/8, $0x0000000088b8c12e +DATA ·CastConst+184(SB)/8, $0x0000000068df31b4 + +// x^248896 mod p(x), x^248832 mod p(x) +DATA ·CastConst+192(SB)/8, $0x00000001230b234c +DATA ·CastConst+200(SB)/8, $0x00000000b059b6c2 + +// x^247872 mod p(x), x^247808 mod p(x) +DATA ·CastConst+208(SB)/8, $0x00000001120b416e +DATA ·CastConst+216(SB)/8, $0x0000000145fb8ed8 + +// x^246848 mod p(x), x^246784 mod p(x) +DATA ·CastConst+224(SB)/8, $0x00000001974aecb0 +DATA ·CastConst+232(SB)/8, $0x00000000cbc09168 + +// x^245824 mod p(x), x^245760 mod p(x) +DATA ·CastConst+240(SB)/8, $0x000000008ee3f226 +DATA ·CastConst+248(SB)/8, $0x000000005ceeedc2 + +// x^244800 mod p(x), x^244736 mod p(x) +DATA ·CastConst+256(SB)/8, $0x00000001089aba9a +DATA ·CastConst+264(SB)/8, $0x0000000047d74e86 + +// x^243776 mod p(x), x^243712 mod p(x) +DATA ·CastConst+272(SB)/8, $0x0000000065113872 +DATA ·CastConst+280(SB)/8, $0x00000001407e9e22 + +// x^242752 mod p(x), x^242688 mod p(x) +DATA ·CastConst+288(SB)/8, $0x000000005c07ec10 +DATA ·CastConst+296(SB)/8, $0x00000001da967bda + +// x^241728 mod p(x), x^241664 mod p(x) +DATA ·CastConst+304(SB)/8, $0x0000000187590924 +DATA ·CastConst+312(SB)/8, $0x000000006c898368 + +// x^240704 mod p(x), x^240640 mod p(x) +DATA ·CastConst+320(SB)/8, $0x00000000e35da7c6 +DATA ·CastConst+328(SB)/8, $0x00000000f2d14c98 + +// x^239680 mod p(x), x^239616 mod p(x) +DATA ·CastConst+336(SB)/8, $0x000000000415855a +DATA ·CastConst+344(SB)/8, $0x00000001993c6ad4 + +// x^238656 mod p(x), x^238592 mod p(x) +DATA ·CastConst+352(SB)/8, $0x0000000073617758 +DATA ·CastConst+360(SB)/8, $0x000000014683d1ac + +// x^237632 mod p(x), x^237568 mod p(x) +DATA ·CastConst+368(SB)/8, $0x0000000176021d28 +DATA ·CastConst+376(SB)/8, $0x00000001a7c93e6c + +// x^236608 mod p(x), x^236544 mod p(x) +DATA ·CastConst+384(SB)/8, $0x00000001c358fd0a +DATA ·CastConst+392(SB)/8, $0x000000010211e90a + +// x^235584 mod p(x), x^235520 mod p(x) +DATA ·CastConst+400(SB)/8, $0x00000001ff7a2c18 +DATA ·CastConst+408(SB)/8, $0x000000001119403e + +// x^234560 mod p(x), x^234496 mod p(x) +DATA ·CastConst+416(SB)/8, $0x00000000f2d9f7e4 +DATA ·CastConst+424(SB)/8, $0x000000001c3261aa + +// x^233536 mod p(x), x^233472 mod p(x) +DATA ·CastConst+432(SB)/8, $0x000000016cf1f9c8 +DATA ·CastConst+440(SB)/8, $0x000000014e37a634 + +// x^232512 mod p(x), x^232448 mod p(x) +DATA ·CastConst+448(SB)/8, $0x000000010af9279a +DATA ·CastConst+456(SB)/8, $0x0000000073786c0c + +// x^231488 mod p(x), x^231424 mod p(x) +DATA ·CastConst+464(SB)/8, $0x0000000004f101e8 +DATA ·CastConst+472(SB)/8, $0x000000011dc037f8 + +// x^230464 mod p(x), x^230400 mod p(x) +DATA ·CastConst+480(SB)/8, $0x0000000070bcf184 +DATA ·CastConst+488(SB)/8, $0x0000000031433dfc + +// x^229440 mod p(x), x^229376 mod p(x) +DATA ·CastConst+496(SB)/8, $0x000000000a8de642 +DATA ·CastConst+504(SB)/8, $0x000000009cde8348 + +// x^228416 mod p(x), x^228352 mod p(x) +DATA ·CastConst+512(SB)/8, $0x0000000062ea130c +DATA ·CastConst+520(SB)/8, $0x0000000038d3c2a6 + +// x^227392 mod p(x), x^227328 mod p(x) +DATA ·CastConst+528(SB)/8, $0x00000001eb31cbb2 +DATA ·CastConst+536(SB)/8, $0x000000011b25f260 + +// x^226368 mod p(x), x^226304 mod p(x) +DATA ·CastConst+544(SB)/8, $0x0000000170783448 +DATA ·CastConst+552(SB)/8, $0x000000001629e6f0 + +// x^225344 mod p(x), x^225280 mod p(x) +DATA ·CastConst+560(SB)/8, $0x00000001a684b4c6 +DATA ·CastConst+568(SB)/8, $0x0000000160838b4c + +// x^224320 mod p(x), x^224256 mod p(x) +DATA ·CastConst+576(SB)/8, $0x00000000253ca5b4 +DATA ·CastConst+584(SB)/8, $0x000000007a44011c + +// x^223296 mod p(x), x^223232 mod p(x) +DATA ·CastConst+592(SB)/8, $0x0000000057b4b1e2 +DATA ·CastConst+600(SB)/8, $0x00000000226f417a + +// x^222272 mod p(x), x^222208 mod p(x) +DATA ·CastConst+608(SB)/8, $0x00000000b6bd084c +DATA ·CastConst+616(SB)/8, $0x0000000045eb2eb4 + +// x^221248 mod p(x), x^221184 mod p(x) +DATA ·CastConst+624(SB)/8, $0x0000000123c2d592 +DATA ·CastConst+632(SB)/8, $0x000000014459d70c + +// x^220224 mod p(x), x^220160 mod p(x) +DATA ·CastConst+640(SB)/8, $0x00000000159dafce +DATA ·CastConst+648(SB)/8, $0x00000001d406ed82 + +// x^219200 mod p(x), x^219136 mod p(x) +DATA ·CastConst+656(SB)/8, $0x0000000127e1a64e +DATA ·CastConst+664(SB)/8, $0x0000000160c8e1a8 + +// x^218176 mod p(x), x^218112 mod p(x) +DATA ·CastConst+672(SB)/8, $0x0000000056860754 +DATA ·CastConst+680(SB)/8, $0x0000000027ba8098 + +// x^217152 mod p(x), x^217088 mod p(x) +DATA ·CastConst+688(SB)/8, $0x00000001e661aae8 +DATA ·CastConst+696(SB)/8, $0x000000006d92d018 + +// x^216128 mod p(x), x^216064 mod p(x) +DATA ·CastConst+704(SB)/8, $0x00000000f82c6166 +DATA ·CastConst+712(SB)/8, $0x000000012ed7e3f2 + +// x^215104 mod p(x), x^215040 mod p(x) +DATA ·CastConst+720(SB)/8, $0x00000000c4f9c7ae +DATA ·CastConst+728(SB)/8, $0x000000002dc87788 + +// x^214080 mod p(x), x^214016 mod p(x) +DATA ·CastConst+736(SB)/8, $0x0000000074203d20 +DATA ·CastConst+744(SB)/8, $0x0000000018240bb8 + +// x^213056 mod p(x), x^212992 mod p(x) +DATA ·CastConst+752(SB)/8, $0x0000000198173052 +DATA ·CastConst+760(SB)/8, $0x000000001ad38158 + +// x^212032 mod p(x), x^211968 mod p(x) +DATA ·CastConst+768(SB)/8, $0x00000001ce8aba54 +DATA ·CastConst+776(SB)/8, $0x00000001396b78f2 + +// x^211008 mod p(x), x^210944 mod p(x) +DATA ·CastConst+784(SB)/8, $0x00000001850d5d94 +DATA ·CastConst+792(SB)/8, $0x000000011a681334 + +// x^209984 mod p(x), x^209920 mod p(x) +DATA ·CastConst+800(SB)/8, $0x00000001d609239c +DATA ·CastConst+808(SB)/8, $0x000000012104732e + +// x^208960 mod p(x), x^208896 mod p(x) +DATA ·CastConst+816(SB)/8, $0x000000001595f048 +DATA ·CastConst+824(SB)/8, $0x00000000a140d90c + +// x^207936 mod p(x), x^207872 mod p(x) +DATA ·CastConst+832(SB)/8, $0x0000000042ccee08 +DATA ·CastConst+840(SB)/8, $0x00000001b7215eda + +// x^206912 mod p(x), x^206848 mod p(x) +DATA ·CastConst+848(SB)/8, $0x000000010a389d74 +DATA ·CastConst+856(SB)/8, $0x00000001aaf1df3c + +// x^205888 mod p(x), x^205824 mod p(x) +DATA ·CastConst+864(SB)/8, $0x000000012a840da6 +DATA ·CastConst+872(SB)/8, $0x0000000029d15b8a + +// x^204864 mod p(x), x^204800 mod p(x) +DATA ·CastConst+880(SB)/8, $0x000000001d181c0c +DATA ·CastConst+888(SB)/8, $0x00000000f1a96922 + +// x^203840 mod p(x), x^203776 mod p(x) +DATA ·CastConst+896(SB)/8, $0x0000000068b7d1f6 +DATA ·CastConst+904(SB)/8, $0x00000001ac80d03c + +// x^202816 mod p(x), x^202752 mod p(x) +DATA ·CastConst+912(SB)/8, $0x000000005b0f14fc +DATA ·CastConst+920(SB)/8, $0x000000000f11d56a + +// x^201792 mod p(x), x^201728 mod p(x) +DATA ·CastConst+928(SB)/8, $0x0000000179e9e730 +DATA ·CastConst+936(SB)/8, $0x00000001f1c022a2 + +// x^200768 mod p(x), x^200704 mod p(x) +DATA ·CastConst+944(SB)/8, $0x00000001ce1368d6 +DATA ·CastConst+952(SB)/8, $0x0000000173d00ae2 + +// x^199744 mod p(x), x^199680 mod p(x) +DATA ·CastConst+960(SB)/8, $0x0000000112c3a84c +DATA ·CastConst+968(SB)/8, $0x00000001d4ffe4ac + +// x^198720 mod p(x), x^198656 mod p(x) +DATA ·CastConst+976(SB)/8, $0x00000000de940fee +DATA ·CastConst+984(SB)/8, $0x000000016edc5ae4 + +// x^197696 mod p(x), x^197632 mod p(x) +DATA ·CastConst+992(SB)/8, $0x00000000fe896b7e +DATA ·CastConst+1000(SB)/8, $0x00000001f1a02140 + +// x^196672 mod p(x), x^196608 mod p(x) +DATA ·CastConst+1008(SB)/8, $0x00000001f797431c +DATA ·CastConst+1016(SB)/8, $0x00000000ca0b28a0 + +// x^195648 mod p(x), x^195584 mod p(x) +DATA ·CastConst+1024(SB)/8, $0x0000000053e989ba +DATA ·CastConst+1032(SB)/8, $0x00000001928e30a2 + +// x^194624 mod p(x), x^194560 mod p(x) +DATA ·CastConst+1040(SB)/8, $0x000000003920cd16 +DATA ·CastConst+1048(SB)/8, $0x0000000097b1b002 + +// x^193600 mod p(x), x^193536 mod p(x) +DATA ·CastConst+1056(SB)/8, $0x00000001e6f579b8 +DATA ·CastConst+1064(SB)/8, $0x00000000b15bf906 + +// x^192576 mod p(x), x^192512 mod p(x) +DATA ·CastConst+1072(SB)/8, $0x000000007493cb0a +DATA ·CastConst+1080(SB)/8, $0x00000000411c5d52 + +// x^191552 mod p(x), x^191488 mod p(x) +DATA ·CastConst+1088(SB)/8, $0x00000001bdd376d8 +DATA ·CastConst+1096(SB)/8, $0x00000001c36f3300 + +// x^190528 mod p(x), x^190464 mod p(x) +DATA ·CastConst+1104(SB)/8, $0x000000016badfee6 +DATA ·CastConst+1112(SB)/8, $0x00000001119227e0 + +// x^189504 mod p(x), x^189440 mod p(x) +DATA ·CastConst+1120(SB)/8, $0x0000000071de5c58 +DATA ·CastConst+1128(SB)/8, $0x00000000114d4702 + +// x^188480 mod p(x), x^188416 mod p(x) +DATA ·CastConst+1136(SB)/8, $0x00000000453f317c +DATA ·CastConst+1144(SB)/8, $0x00000000458b5b98 + +// x^187456 mod p(x), x^187392 mod p(x) +DATA ·CastConst+1152(SB)/8, $0x0000000121675cce +DATA ·CastConst+1160(SB)/8, $0x000000012e31fb8e + +// x^186432 mod p(x), x^186368 mod p(x) +DATA ·CastConst+1168(SB)/8, $0x00000001f409ee92 +DATA ·CastConst+1176(SB)/8, $0x000000005cf619d8 + +// x^185408 mod p(x), x^185344 mod p(x) +DATA ·CastConst+1184(SB)/8, $0x00000000f36b9c88 +DATA ·CastConst+1192(SB)/8, $0x0000000063f4d8b2 + +// x^184384 mod p(x), x^184320 mod p(x) +DATA ·CastConst+1200(SB)/8, $0x0000000036b398f4 +DATA ·CastConst+1208(SB)/8, $0x000000004138dc8a + +// x^183360 mod p(x), x^183296 mod p(x) +DATA ·CastConst+1216(SB)/8, $0x00000001748f9adc +DATA ·CastConst+1224(SB)/8, $0x00000001d29ee8e0 + +// x^182336 mod p(x), x^182272 mod p(x) +DATA ·CastConst+1232(SB)/8, $0x00000001be94ec00 +DATA ·CastConst+1240(SB)/8, $0x000000006a08ace8 + +// x^181312 mod p(x), x^181248 mod p(x) +DATA ·CastConst+1248(SB)/8, $0x00000000b74370d6 +DATA ·CastConst+1256(SB)/8, $0x0000000127d42010 + +// x^180288 mod p(x), x^180224 mod p(x) +DATA ·CastConst+1264(SB)/8, $0x00000001174d0b98 +DATA ·CastConst+1272(SB)/8, $0x0000000019d76b62 + +// x^179264 mod p(x), x^179200 mod p(x) +DATA ·CastConst+1280(SB)/8, $0x00000000befc06a4 +DATA ·CastConst+1288(SB)/8, $0x00000001b1471f6e + +// x^178240 mod p(x), x^178176 mod p(x) +DATA ·CastConst+1296(SB)/8, $0x00000001ae125288 +DATA ·CastConst+1304(SB)/8, $0x00000001f64c19cc + +// x^177216 mod p(x), x^177152 mod p(x) +DATA ·CastConst+1312(SB)/8, $0x0000000095c19b34 +DATA ·CastConst+1320(SB)/8, $0x00000000003c0ea0 + +// x^176192 mod p(x), x^176128 mod p(x) +DATA ·CastConst+1328(SB)/8, $0x00000001a78496f2 +DATA ·CastConst+1336(SB)/8, $0x000000014d73abf6 + +// x^175168 mod p(x), x^175104 mod p(x) +DATA ·CastConst+1344(SB)/8, $0x00000001ac5390a0 +DATA ·CastConst+1352(SB)/8, $0x00000001620eb844 + +// x^174144 mod p(x), x^174080 mod p(x) +DATA ·CastConst+1360(SB)/8, $0x000000002a80ed6e +DATA ·CastConst+1368(SB)/8, $0x0000000147655048 + +// x^173120 mod p(x), x^173056 mod p(x) +DATA ·CastConst+1376(SB)/8, $0x00000001fa9b0128 +DATA ·CastConst+1384(SB)/8, $0x0000000067b5077e + +// x^172096 mod p(x), x^172032 mod p(x) +DATA ·CastConst+1392(SB)/8, $0x00000001ea94929e +DATA ·CastConst+1400(SB)/8, $0x0000000010ffe206 + +// x^171072 mod p(x), x^171008 mod p(x) +DATA ·CastConst+1408(SB)/8, $0x0000000125f4305c +DATA ·CastConst+1416(SB)/8, $0x000000000fee8f1e + +// x^170048 mod p(x), x^169984 mod p(x) +DATA ·CastConst+1424(SB)/8, $0x00000001471e2002 +DATA ·CastConst+1432(SB)/8, $0x00000001da26fbae + +// x^169024 mod p(x), x^168960 mod p(x) +DATA ·CastConst+1440(SB)/8, $0x0000000132d2253a +DATA ·CastConst+1448(SB)/8, $0x00000001b3a8bd88 + +// x^168000 mod p(x), x^167936 mod p(x) +DATA ·CastConst+1456(SB)/8, $0x00000000f26b3592 +DATA ·CastConst+1464(SB)/8, $0x00000000e8f3898e + +// x^166976 mod p(x), x^166912 mod p(x) +DATA ·CastConst+1472(SB)/8, $0x00000000bc8b67b0 +DATA ·CastConst+1480(SB)/8, $0x00000000b0d0d28c + +// x^165952 mod p(x), x^165888 mod p(x) +DATA ·CastConst+1488(SB)/8, $0x000000013a826ef2 +DATA ·CastConst+1496(SB)/8, $0x0000000030f2a798 + +// x^164928 mod p(x), x^164864 mod p(x) +DATA ·CastConst+1504(SB)/8, $0x0000000081482c84 +DATA ·CastConst+1512(SB)/8, $0x000000000fba1002 + +// x^163904 mod p(x), x^163840 mod p(x) +DATA ·CastConst+1520(SB)/8, $0x00000000e77307c2 +DATA ·CastConst+1528(SB)/8, $0x00000000bdb9bd72 + +// x^162880 mod p(x), x^162816 mod p(x) +DATA ·CastConst+1536(SB)/8, $0x00000000d4a07ec8 +DATA ·CastConst+1544(SB)/8, $0x0000000075d3bf5a + +// x^161856 mod p(x), x^161792 mod p(x) +DATA ·CastConst+1552(SB)/8, $0x0000000017102100 +DATA ·CastConst+1560(SB)/8, $0x00000000ef1f98a0 + +// x^160832 mod p(x), x^160768 mod p(x) +DATA ·CastConst+1568(SB)/8, $0x00000000db406486 +DATA ·CastConst+1576(SB)/8, $0x00000000689c7602 + +// x^159808 mod p(x), x^159744 mod p(x) +DATA ·CastConst+1584(SB)/8, $0x0000000192db7f88 +DATA ·CastConst+1592(SB)/8, $0x000000016d5fa5fe + +// x^158784 mod p(x), x^158720 mod p(x) +DATA ·CastConst+1600(SB)/8, $0x000000018bf67b1e +DATA ·CastConst+1608(SB)/8, $0x00000001d0d2b9ca + +// x^157760 mod p(x), x^157696 mod p(x) +DATA ·CastConst+1616(SB)/8, $0x000000007c09163e +DATA ·CastConst+1624(SB)/8, $0x0000000041e7b470 + +// x^156736 mod p(x), x^156672 mod p(x) +DATA ·CastConst+1632(SB)/8, $0x000000000adac060 +DATA ·CastConst+1640(SB)/8, $0x00000001cbb6495e + +// x^155712 mod p(x), x^155648 mod p(x) +DATA ·CastConst+1648(SB)/8, $0x00000000bd8316ae +DATA ·CastConst+1656(SB)/8, $0x000000010052a0b0 + +// x^154688 mod p(x), x^154624 mod p(x) +DATA ·CastConst+1664(SB)/8, $0x000000019f09ab54 +DATA ·CastConst+1672(SB)/8, $0x00000001d8effb5c + +// x^153664 mod p(x), x^153600 mod p(x) +DATA ·CastConst+1680(SB)/8, $0x0000000125155542 +DATA ·CastConst+1688(SB)/8, $0x00000001d969853c + +// x^152640 mod p(x), x^152576 mod p(x) +DATA ·CastConst+1696(SB)/8, $0x000000018fdb5882 +DATA ·CastConst+1704(SB)/8, $0x00000000523ccce2 + +// x^151616 mod p(x), x^151552 mod p(x) +DATA ·CastConst+1712(SB)/8, $0x00000000e794b3f4 +DATA ·CastConst+1720(SB)/8, $0x000000001e2436bc + +// x^150592 mod p(x), x^150528 mod p(x) +DATA ·CastConst+1728(SB)/8, $0x000000016f9bb022 +DATA ·CastConst+1736(SB)/8, $0x00000000ddd1c3a2 + +// x^149568 mod p(x), x^149504 mod p(x) +DATA ·CastConst+1744(SB)/8, $0x00000000290c9978 +DATA ·CastConst+1752(SB)/8, $0x0000000019fcfe38 + +// x^148544 mod p(x), x^148480 mod p(x) +DATA ·CastConst+1760(SB)/8, $0x0000000083c0f350 +DATA ·CastConst+1768(SB)/8, $0x00000001ce95db64 + +// x^147520 mod p(x), x^147456 mod p(x) +DATA ·CastConst+1776(SB)/8, $0x0000000173ea6628 +DATA ·CastConst+1784(SB)/8, $0x00000000af582806 + +// x^146496 mod p(x), x^146432 mod p(x) +DATA ·CastConst+1792(SB)/8, $0x00000001c8b4e00a +DATA ·CastConst+1800(SB)/8, $0x00000001006388f6 + +// x^145472 mod p(x), x^145408 mod p(x) +DATA ·CastConst+1808(SB)/8, $0x00000000de95d6aa +DATA ·CastConst+1816(SB)/8, $0x0000000179eca00a + +// x^144448 mod p(x), x^144384 mod p(x) +DATA ·CastConst+1824(SB)/8, $0x000000010b7f7248 +DATA ·CastConst+1832(SB)/8, $0x0000000122410a6a + +// x^143424 mod p(x), x^143360 mod p(x) +DATA ·CastConst+1840(SB)/8, $0x00000001326e3a06 +DATA ·CastConst+1848(SB)/8, $0x000000004288e87c + +// x^142400 mod p(x), x^142336 mod p(x) +DATA ·CastConst+1856(SB)/8, $0x00000000bb62c2e6 +DATA ·CastConst+1864(SB)/8, $0x000000016c5490da + +// x^141376 mod p(x), x^141312 mod p(x) +DATA ·CastConst+1872(SB)/8, $0x0000000156a4b2c2 +DATA ·CastConst+1880(SB)/8, $0x00000000d1c71f6e + +// x^140352 mod p(x), x^140288 mod p(x) +DATA ·CastConst+1888(SB)/8, $0x000000011dfe763a +DATA ·CastConst+1896(SB)/8, $0x00000001b4ce08a6 + +// x^139328 mod p(x), x^139264 mod p(x) +DATA ·CastConst+1904(SB)/8, $0x000000007bcca8e2 +DATA ·CastConst+1912(SB)/8, $0x00000001466ba60c + +// x^138304 mod p(x), x^138240 mod p(x) +DATA ·CastConst+1920(SB)/8, $0x0000000186118faa +DATA ·CastConst+1928(SB)/8, $0x00000001f6c488a4 + +// x^137280 mod p(x), x^137216 mod p(x) +DATA ·CastConst+1936(SB)/8, $0x0000000111a65a88 +DATA ·CastConst+1944(SB)/8, $0x000000013bfb0682 + +// x^136256 mod p(x), x^136192 mod p(x) +DATA ·CastConst+1952(SB)/8, $0x000000003565e1c4 +DATA ·CastConst+1960(SB)/8, $0x00000000690e9e54 + +// x^135232 mod p(x), x^135168 mod p(x) +DATA ·CastConst+1968(SB)/8, $0x000000012ed02a82 +DATA ·CastConst+1976(SB)/8, $0x00000000281346b6 + +// x^134208 mod p(x), x^134144 mod p(x) +DATA ·CastConst+1984(SB)/8, $0x00000000c486ecfc +DATA ·CastConst+1992(SB)/8, $0x0000000156464024 + +// x^133184 mod p(x), x^133120 mod p(x) +DATA ·CastConst+2000(SB)/8, $0x0000000001b951b2 +DATA ·CastConst+2008(SB)/8, $0x000000016063a8dc + +// x^132160 mod p(x), x^132096 mod p(x) +DATA ·CastConst+2016(SB)/8, $0x0000000048143916 +DATA ·CastConst+2024(SB)/8, $0x0000000116a66362 + +// x^131136 mod p(x), x^131072 mod p(x) +DATA ·CastConst+2032(SB)/8, $0x00000001dc2ae124 +DATA ·CastConst+2040(SB)/8, $0x000000017e8aa4d2 + +// x^130112 mod p(x), x^130048 mod p(x) +DATA ·CastConst+2048(SB)/8, $0x00000001416c58d6 +DATA ·CastConst+2056(SB)/8, $0x00000001728eb10c + +// x^129088 mod p(x), x^129024 mod p(x) +DATA ·CastConst+2064(SB)/8, $0x00000000a479744a +DATA ·CastConst+2072(SB)/8, $0x00000001b08fd7fa + +// x^128064 mod p(x), x^128000 mod p(x) +DATA ·CastConst+2080(SB)/8, $0x0000000096ca3a26 +DATA ·CastConst+2088(SB)/8, $0x00000001092a16e8 + +// x^127040 mod p(x), x^126976 mod p(x) +DATA ·CastConst+2096(SB)/8, $0x00000000ff223d4e +DATA ·CastConst+2104(SB)/8, $0x00000000a505637c + +// x^126016 mod p(x), x^125952 mod p(x) +DATA ·CastConst+2112(SB)/8, $0x000000010e84da42 +DATA ·CastConst+2120(SB)/8, $0x00000000d94869b2 + +// x^124992 mod p(x), x^124928 mod p(x) +DATA ·CastConst+2128(SB)/8, $0x00000001b61ba3d0 +DATA ·CastConst+2136(SB)/8, $0x00000001c8b203ae + +// x^123968 mod p(x), x^123904 mod p(x) +DATA ·CastConst+2144(SB)/8, $0x00000000680f2de8 +DATA ·CastConst+2152(SB)/8, $0x000000005704aea0 + +// x^122944 mod p(x), x^122880 mod p(x) +DATA ·CastConst+2160(SB)/8, $0x000000008772a9a8 +DATA ·CastConst+2168(SB)/8, $0x000000012e295fa2 + +// x^121920 mod p(x), x^121856 mod p(x) +DATA ·CastConst+2176(SB)/8, $0x0000000155f295bc +DATA ·CastConst+2184(SB)/8, $0x000000011d0908bc + +// x^120896 mod p(x), x^120832 mod p(x) +DATA ·CastConst+2192(SB)/8, $0x00000000595f9282 +DATA ·CastConst+2200(SB)/8, $0x0000000193ed97ea + +// x^119872 mod p(x), x^119808 mod p(x) +DATA ·CastConst+2208(SB)/8, $0x0000000164b1c25a +DATA ·CastConst+2216(SB)/8, $0x000000013a0f1c52 + +// x^118848 mod p(x), x^118784 mod p(x) +DATA ·CastConst+2224(SB)/8, $0x00000000fbd67c50 +DATA ·CastConst+2232(SB)/8, $0x000000010c2c40c0 + +// x^117824 mod p(x), x^117760 mod p(x) +DATA ·CastConst+2240(SB)/8, $0x0000000096076268 +DATA ·CastConst+2248(SB)/8, $0x00000000ff6fac3e + +// x^116800 mod p(x), x^116736 mod p(x) +DATA ·CastConst+2256(SB)/8, $0x00000001d288e4cc +DATA ·CastConst+2264(SB)/8, $0x000000017b3609c0 + +// x^115776 mod p(x), x^115712 mod p(x) +DATA ·CastConst+2272(SB)/8, $0x00000001eaac1bdc +DATA ·CastConst+2280(SB)/8, $0x0000000088c8c922 + +// x^114752 mod p(x), x^114688 mod p(x) +DATA ·CastConst+2288(SB)/8, $0x00000001f1ea39e2 +DATA ·CastConst+2296(SB)/8, $0x00000001751baae6 + +// x^113728 mod p(x), x^113664 mod p(x) +DATA ·CastConst+2304(SB)/8, $0x00000001eb6506fc +DATA ·CastConst+2312(SB)/8, $0x0000000107952972 + +// x^112704 mod p(x), x^112640 mod p(x) +DATA ·CastConst+2320(SB)/8, $0x000000010f806ffe +DATA ·CastConst+2328(SB)/8, $0x0000000162b00abe + +// x^111680 mod p(x), x^111616 mod p(x) +DATA ·CastConst+2336(SB)/8, $0x000000010408481e +DATA ·CastConst+2344(SB)/8, $0x000000000d7b404c + +// x^110656 mod p(x), x^110592 mod p(x) +DATA ·CastConst+2352(SB)/8, $0x0000000188260534 +DATA ·CastConst+2360(SB)/8, $0x00000000763b13d4 + +// x^109632 mod p(x), x^109568 mod p(x) +DATA ·CastConst+2368(SB)/8, $0x0000000058fc73e0 +DATA ·CastConst+2376(SB)/8, $0x00000000f6dc22d8 + +// x^108608 mod p(x), x^108544 mod p(x) +DATA ·CastConst+2384(SB)/8, $0x00000000391c59b8 +DATA ·CastConst+2392(SB)/8, $0x000000007daae060 + +// x^107584 mod p(x), x^107520 mod p(x) +DATA ·CastConst+2400(SB)/8, $0x000000018b638400 +DATA ·CastConst+2408(SB)/8, $0x000000013359ab7c + +// x^106560 mod p(x), x^106496 mod p(x) +DATA ·CastConst+2416(SB)/8, $0x000000011738f5c4 +DATA ·CastConst+2424(SB)/8, $0x000000008add438a + +// x^105536 mod p(x), x^105472 mod p(x) +DATA ·CastConst+2432(SB)/8, $0x000000008cf7c6da +DATA ·CastConst+2440(SB)/8, $0x00000001edbefdea + +// x^104512 mod p(x), x^104448 mod p(x) +DATA ·CastConst+2448(SB)/8, $0x00000001ef97fb16 +DATA ·CastConst+2456(SB)/8, $0x000000004104e0f8 + +// x^103488 mod p(x), x^103424 mod p(x) +DATA ·CastConst+2464(SB)/8, $0x0000000102130e20 +DATA ·CastConst+2472(SB)/8, $0x00000000b48a8222 + +// x^102464 mod p(x), x^102400 mod p(x) +DATA ·CastConst+2480(SB)/8, $0x00000000db968898 +DATA ·CastConst+2488(SB)/8, $0x00000001bcb46844 + +// x^101440 mod p(x), x^101376 mod p(x) +DATA ·CastConst+2496(SB)/8, $0x00000000b5047b5e +DATA ·CastConst+2504(SB)/8, $0x000000013293ce0a + +// x^100416 mod p(x), x^100352 mod p(x) +DATA ·CastConst+2512(SB)/8, $0x000000010b90fdb2 +DATA ·CastConst+2520(SB)/8, $0x00000001710d0844 + +// x^99392 mod p(x), x^99328 mod p(x) +DATA ·CastConst+2528(SB)/8, $0x000000004834a32e +DATA ·CastConst+2536(SB)/8, $0x0000000117907f6e + +// x^98368 mod p(x), x^98304 mod p(x) +DATA ·CastConst+2544(SB)/8, $0x0000000059c8f2b0 +DATA ·CastConst+2552(SB)/8, $0x0000000087ddf93e + +// x^97344 mod p(x), x^97280 mod p(x) +DATA ·CastConst+2560(SB)/8, $0x0000000122cec508 +DATA ·CastConst+2568(SB)/8, $0x000000005970e9b0 + +// x^96320 mod p(x), x^96256 mod p(x) +DATA ·CastConst+2576(SB)/8, $0x000000000a330cda +DATA ·CastConst+2584(SB)/8, $0x0000000185b2b7d0 + +// x^95296 mod p(x), x^95232 mod p(x) +DATA ·CastConst+2592(SB)/8, $0x000000014a47148c +DATA ·CastConst+2600(SB)/8, $0x00000001dcee0efc + +// x^94272 mod p(x), x^94208 mod p(x) +DATA ·CastConst+2608(SB)/8, $0x0000000042c61cb8 +DATA ·CastConst+2616(SB)/8, $0x0000000030da2722 + +// x^93248 mod p(x), x^93184 mod p(x) +DATA ·CastConst+2624(SB)/8, $0x0000000012fe6960 +DATA ·CastConst+2632(SB)/8, $0x000000012f925a18 + +// x^92224 mod p(x), x^92160 mod p(x) +DATA ·CastConst+2640(SB)/8, $0x00000000dbda2c20 +DATA ·CastConst+2648(SB)/8, $0x00000000dd2e357c + +// x^91200 mod p(x), x^91136 mod p(x) +DATA ·CastConst+2656(SB)/8, $0x000000011122410c +DATA ·CastConst+2664(SB)/8, $0x00000000071c80de + +// x^90176 mod p(x), x^90112 mod p(x) +DATA ·CastConst+2672(SB)/8, $0x00000000977b2070 +DATA ·CastConst+2680(SB)/8, $0x000000011513140a + +// x^89152 mod p(x), x^89088 mod p(x) +DATA ·CastConst+2688(SB)/8, $0x000000014050438e +DATA ·CastConst+2696(SB)/8, $0x00000001df876e8e + +// x^88128 mod p(x), x^88064 mod p(x) +DATA ·CastConst+2704(SB)/8, $0x0000000147c840e8 +DATA ·CastConst+2712(SB)/8, $0x000000015f81d6ce + +// x^87104 mod p(x), x^87040 mod p(x) +DATA ·CastConst+2720(SB)/8, $0x00000001cc7c88ce +DATA ·CastConst+2728(SB)/8, $0x000000019dd94dbe + +// x^86080 mod p(x), x^86016 mod p(x) +DATA ·CastConst+2736(SB)/8, $0x00000001476b35a4 +DATA ·CastConst+2744(SB)/8, $0x00000001373d206e + +// x^85056 mod p(x), x^84992 mod p(x) +DATA ·CastConst+2752(SB)/8, $0x000000013d52d508 +DATA ·CastConst+2760(SB)/8, $0x00000000668ccade + +// x^84032 mod p(x), x^83968 mod p(x) +DATA ·CastConst+2768(SB)/8, $0x000000008e4be32e +DATA ·CastConst+2776(SB)/8, $0x00000001b192d268 + +// x^83008 mod p(x), x^82944 mod p(x) +DATA ·CastConst+2784(SB)/8, $0x00000000024120fe +DATA ·CastConst+2792(SB)/8, $0x00000000e30f3a78 + +// x^81984 mod p(x), x^81920 mod p(x) +DATA ·CastConst+2800(SB)/8, $0x00000000ddecddb4 +DATA ·CastConst+2808(SB)/8, $0x000000010ef1f7bc + +// x^80960 mod p(x), x^80896 mod p(x) +DATA ·CastConst+2816(SB)/8, $0x00000000d4d403bc +DATA ·CastConst+2824(SB)/8, $0x00000001f5ac7380 + +// x^79936 mod p(x), x^79872 mod p(x) +DATA ·CastConst+2832(SB)/8, $0x00000001734b89aa +DATA ·CastConst+2840(SB)/8, $0x000000011822ea70 + +// x^78912 mod p(x), x^78848 mod p(x) +DATA ·CastConst+2848(SB)/8, $0x000000010e7a58d6 +DATA ·CastConst+2856(SB)/8, $0x00000000c3a33848 + +// x^77888 mod p(x), x^77824 mod p(x) +DATA ·CastConst+2864(SB)/8, $0x00000001f9f04e9c +DATA ·CastConst+2872(SB)/8, $0x00000001bd151c24 + +// x^76864 mod p(x), x^76800 mod p(x) +DATA ·CastConst+2880(SB)/8, $0x00000000b692225e +DATA ·CastConst+2888(SB)/8, $0x0000000056002d76 + +// x^75840 mod p(x), x^75776 mod p(x) +DATA ·CastConst+2896(SB)/8, $0x000000019b8d3f3e +DATA ·CastConst+2904(SB)/8, $0x000000014657c4f4 + +// x^74816 mod p(x), x^74752 mod p(x) +DATA ·CastConst+2912(SB)/8, $0x00000001a874f11e +DATA ·CastConst+2920(SB)/8, $0x0000000113742d7c + +// x^73792 mod p(x), x^73728 mod p(x) +DATA ·CastConst+2928(SB)/8, $0x000000010d5a4254 +DATA ·CastConst+2936(SB)/8, $0x000000019c5920ba + +// x^72768 mod p(x), x^72704 mod p(x) +DATA ·CastConst+2944(SB)/8, $0x00000000bbb2f5d6 +DATA ·CastConst+2952(SB)/8, $0x000000005216d2d6 + +// x^71744 mod p(x), x^71680 mod p(x) +DATA ·CastConst+2960(SB)/8, $0x0000000179cc0e36 +DATA ·CastConst+2968(SB)/8, $0x0000000136f5ad8a + +// x^70720 mod p(x), x^70656 mod p(x) +DATA ·CastConst+2976(SB)/8, $0x00000001dca1da4a +DATA ·CastConst+2984(SB)/8, $0x000000018b07beb6 + +// x^69696 mod p(x), x^69632 mod p(x) +DATA ·CastConst+2992(SB)/8, $0x00000000feb1a192 +DATA ·CastConst+3000(SB)/8, $0x00000000db1e93b0 + +// x^68672 mod p(x), x^68608 mod p(x) +DATA ·CastConst+3008(SB)/8, $0x00000000d1eeedd6 +DATA ·CastConst+3016(SB)/8, $0x000000000b96fa3a + +// x^67648 mod p(x), x^67584 mod p(x) +DATA ·CastConst+3024(SB)/8, $0x000000008fad9bb4 +DATA ·CastConst+3032(SB)/8, $0x00000001d9968af0 + +// x^66624 mod p(x), x^66560 mod p(x) +DATA ·CastConst+3040(SB)/8, $0x00000001884938e4 +DATA ·CastConst+3048(SB)/8, $0x000000000e4a77a2 + +// x^65600 mod p(x), x^65536 mod p(x) +DATA ·CastConst+3056(SB)/8, $0x00000001bc2e9bc0 +DATA ·CastConst+3064(SB)/8, $0x00000000508c2ac8 + +// x^64576 mod p(x), x^64512 mod p(x) +DATA ·CastConst+3072(SB)/8, $0x00000001f9658a68 +DATA ·CastConst+3080(SB)/8, $0x0000000021572a80 + +// x^63552 mod p(x), x^63488 mod p(x) +DATA ·CastConst+3088(SB)/8, $0x000000001b9224fc +DATA ·CastConst+3096(SB)/8, $0x00000001b859daf2 + +// x^62528 mod p(x), x^62464 mod p(x) +DATA ·CastConst+3104(SB)/8, $0x0000000055b2fb84 +DATA ·CastConst+3112(SB)/8, $0x000000016f788474 + +// x^61504 mod p(x), x^61440 mod p(x) +DATA ·CastConst+3120(SB)/8, $0x000000018b090348 +DATA ·CastConst+3128(SB)/8, $0x00000001b438810e + +// x^60480 mod p(x), x^60416 mod p(x) +DATA ·CastConst+3136(SB)/8, $0x000000011ccbd5ea +DATA ·CastConst+3144(SB)/8, $0x0000000095ddc6f2 + +// x^59456 mod p(x), x^59392 mod p(x) +DATA ·CastConst+3152(SB)/8, $0x0000000007ae47f8 +DATA ·CastConst+3160(SB)/8, $0x00000001d977c20c + +// x^58432 mod p(x), x^58368 mod p(x) +DATA ·CastConst+3168(SB)/8, $0x0000000172acbec0 +DATA ·CastConst+3176(SB)/8, $0x00000000ebedb99a + +// x^57408 mod p(x), x^57344 mod p(x) +DATA ·CastConst+3184(SB)/8, $0x00000001c6e3ff20 +DATA ·CastConst+3192(SB)/8, $0x00000001df9e9e92 + +// x^56384 mod p(x), x^56320 mod p(x) +DATA ·CastConst+3200(SB)/8, $0x00000000e1b38744 +DATA ·CastConst+3208(SB)/8, $0x00000001a4a3f952 + +// x^55360 mod p(x), x^55296 mod p(x) +DATA ·CastConst+3216(SB)/8, $0x00000000791585b2 +DATA ·CastConst+3224(SB)/8, $0x00000000e2f51220 + +// x^54336 mod p(x), x^54272 mod p(x) +DATA ·CastConst+3232(SB)/8, $0x00000000ac53b894 +DATA ·CastConst+3240(SB)/8, $0x000000004aa01f3e + +// x^53312 mod p(x), x^53248 mod p(x) +DATA ·CastConst+3248(SB)/8, $0x00000001ed5f2cf4 +DATA ·CastConst+3256(SB)/8, $0x00000000b3e90a58 + +// x^52288 mod p(x), x^52224 mod p(x) +DATA ·CastConst+3264(SB)/8, $0x00000001df48b2e0 +DATA ·CastConst+3272(SB)/8, $0x000000000c9ca2aa + +// x^51264 mod p(x), x^51200 mod p(x) +DATA ·CastConst+3280(SB)/8, $0x00000000049c1c62 +DATA ·CastConst+3288(SB)/8, $0x0000000151682316 + +// x^50240 mod p(x), x^50176 mod p(x) +DATA ·CastConst+3296(SB)/8, $0x000000017c460c12 +DATA ·CastConst+3304(SB)/8, $0x0000000036fce78c + +// x^49216 mod p(x), x^49152 mod p(x) +DATA ·CastConst+3312(SB)/8, $0x000000015be4da7e +DATA ·CastConst+3320(SB)/8, $0x000000009037dc10 + +// x^48192 mod p(x), x^48128 mod p(x) +DATA ·CastConst+3328(SB)/8, $0x000000010f38f668 +DATA ·CastConst+3336(SB)/8, $0x00000000d3298582 + +// x^47168 mod p(x), x^47104 mod p(x) +DATA ·CastConst+3344(SB)/8, $0x0000000039f40a00 +DATA ·CastConst+3352(SB)/8, $0x00000001b42e8ad6 + +// x^46144 mod p(x), x^46080 mod p(x) +DATA ·CastConst+3360(SB)/8, $0x00000000bd4c10c4 +DATA ·CastConst+3368(SB)/8, $0x00000000142a9838 + +// x^45120 mod p(x), x^45056 mod p(x) +DATA ·CastConst+3376(SB)/8, $0x0000000042db1d98 +DATA ·CastConst+3384(SB)/8, $0x0000000109c7f190 + +// x^44096 mod p(x), x^44032 mod p(x) +DATA ·CastConst+3392(SB)/8, $0x00000001c905bae6 +DATA ·CastConst+3400(SB)/8, $0x0000000056ff9310 + +// x^43072 mod p(x), x^43008 mod p(x) +DATA ·CastConst+3408(SB)/8, $0x00000000069d40ea +DATA ·CastConst+3416(SB)/8, $0x00000001594513aa + +// x^42048 mod p(x), x^41984 mod p(x) +DATA ·CastConst+3424(SB)/8, $0x000000008e4fbad0 +DATA ·CastConst+3432(SB)/8, $0x00000001e3b5b1e8 + +// x^41024 mod p(x), x^40960 mod p(x) +DATA ·CastConst+3440(SB)/8, $0x0000000047bedd46 +DATA ·CastConst+3448(SB)/8, $0x000000011dd5fc08 + +// x^40000 mod p(x), x^39936 mod p(x) +DATA ·CastConst+3456(SB)/8, $0x0000000026396bf8 +DATA ·CastConst+3464(SB)/8, $0x00000001675f0cc2 + +// x^38976 mod p(x), x^38912 mod p(x) +DATA ·CastConst+3472(SB)/8, $0x00000000379beb92 +DATA ·CastConst+3480(SB)/8, $0x00000000d1c8dd44 + +// x^37952 mod p(x), x^37888 mod p(x) +DATA ·CastConst+3488(SB)/8, $0x000000000abae54a +DATA ·CastConst+3496(SB)/8, $0x0000000115ebd3d8 + +// x^36928 mod p(x), x^36864 mod p(x) +DATA ·CastConst+3504(SB)/8, $0x0000000007e6a128 +DATA ·CastConst+3512(SB)/8, $0x00000001ecbd0dac + +// x^35904 mod p(x), x^35840 mod p(x) +DATA ·CastConst+3520(SB)/8, $0x000000000ade29d2 +DATA ·CastConst+3528(SB)/8, $0x00000000cdf67af2 + +// x^34880 mod p(x), x^34816 mod p(x) +DATA ·CastConst+3536(SB)/8, $0x00000000f974c45c +DATA ·CastConst+3544(SB)/8, $0x000000004c01ff4c + +// x^33856 mod p(x), x^33792 mod p(x) +DATA ·CastConst+3552(SB)/8, $0x00000000e77ac60a +DATA ·CastConst+3560(SB)/8, $0x00000000f2d8657e + +// x^32832 mod p(x), x^32768 mod p(x) +DATA ·CastConst+3568(SB)/8, $0x0000000145895816 +DATA ·CastConst+3576(SB)/8, $0x000000006bae74c4 + +// x^31808 mod p(x), x^31744 mod p(x) +DATA ·CastConst+3584(SB)/8, $0x0000000038e362be +DATA ·CastConst+3592(SB)/8, $0x0000000152af8aa0 + +// x^30784 mod p(x), x^30720 mod p(x) +DATA ·CastConst+3600(SB)/8, $0x000000007f991a64 +DATA ·CastConst+3608(SB)/8, $0x0000000004663802 + +// x^29760 mod p(x), x^29696 mod p(x) +DATA ·CastConst+3616(SB)/8, $0x00000000fa366d3a +DATA ·CastConst+3624(SB)/8, $0x00000001ab2f5afc + +// x^28736 mod p(x), x^28672 mod p(x) +DATA ·CastConst+3632(SB)/8, $0x00000001a2bb34f0 +DATA ·CastConst+3640(SB)/8, $0x0000000074a4ebd4 + +// x^27712 mod p(x), x^27648 mod p(x) +DATA ·CastConst+3648(SB)/8, $0x0000000028a9981e +DATA ·CastConst+3656(SB)/8, $0x00000001d7ab3a4c + +// x^26688 mod p(x), x^26624 mod p(x) +DATA ·CastConst+3664(SB)/8, $0x00000001dbc672be +DATA ·CastConst+3672(SB)/8, $0x00000001a8da60c6 + +// x^25664 mod p(x), x^25600 mod p(x) +DATA ·CastConst+3680(SB)/8, $0x00000000b04d77f6 +DATA ·CastConst+3688(SB)/8, $0x000000013cf63820 + +// x^24640 mod p(x), x^24576 mod p(x) +DATA ·CastConst+3696(SB)/8, $0x0000000124400d96 +DATA ·CastConst+3704(SB)/8, $0x00000000bec12e1e + +// x^23616 mod p(x), x^23552 mod p(x) +DATA ·CastConst+3712(SB)/8, $0x000000014ca4b414 +DATA ·CastConst+3720(SB)/8, $0x00000001c6368010 + +// x^22592 mod p(x), x^22528 mod p(x) +DATA ·CastConst+3728(SB)/8, $0x000000012fe2c938 +DATA ·CastConst+3736(SB)/8, $0x00000001e6e78758 + +// x^21568 mod p(x), x^21504 mod p(x) +DATA ·CastConst+3744(SB)/8, $0x00000001faed01e6 +DATA ·CastConst+3752(SB)/8, $0x000000008d7f2b3c + +// x^20544 mod p(x), x^20480 mod p(x) +DATA ·CastConst+3760(SB)/8, $0x000000007e80ecfe +DATA ·CastConst+3768(SB)/8, $0x000000016b4a156e + +// x^19520 mod p(x), x^19456 mod p(x) +DATA ·CastConst+3776(SB)/8, $0x0000000098daee94 +DATA ·CastConst+3784(SB)/8, $0x00000001c63cfeb6 + +// x^18496 mod p(x), x^18432 mod p(x) +DATA ·CastConst+3792(SB)/8, $0x000000010a04edea +DATA ·CastConst+3800(SB)/8, $0x000000015f902670 + +// x^17472 mod p(x), x^17408 mod p(x) +DATA ·CastConst+3808(SB)/8, $0x00000001c00b4524 +DATA ·CastConst+3816(SB)/8, $0x00000001cd5de11e + +// x^16448 mod p(x), x^16384 mod p(x) +DATA ·CastConst+3824(SB)/8, $0x0000000170296550 +DATA ·CastConst+3832(SB)/8, $0x000000001acaec54 + +// x^15424 mod p(x), x^15360 mod p(x) +DATA ·CastConst+3840(SB)/8, $0x0000000181afaa48 +DATA ·CastConst+3848(SB)/8, $0x000000002bd0ca78 + +// x^14400 mod p(x), x^14336 mod p(x) +DATA ·CastConst+3856(SB)/8, $0x0000000185a31ffa +DATA ·CastConst+3864(SB)/8, $0x0000000032d63d5c + +// x^13376 mod p(x), x^13312 mod p(x) +DATA ·CastConst+3872(SB)/8, $0x000000002469f608 +DATA ·CastConst+3880(SB)/8, $0x000000001c6d4e4c + +// x^12352 mod p(x), x^12288 mod p(x) +DATA ·CastConst+3888(SB)/8, $0x000000006980102a +DATA ·CastConst+3896(SB)/8, $0x0000000106a60b92 + +// x^11328 mod p(x), x^11264 mod p(x) +DATA ·CastConst+3904(SB)/8, $0x0000000111ea9ca8 +DATA ·CastConst+3912(SB)/8, $0x00000000d3855e12 + +// x^10304 mod p(x), x^10240 mod p(x) +DATA ·CastConst+3920(SB)/8, $0x00000001bd1d29ce +DATA ·CastConst+3928(SB)/8, $0x00000000e3125636 + +// x^9280 mod p(x), x^9216 mod p(x) +DATA ·CastConst+3936(SB)/8, $0x00000001b34b9580 +DATA ·CastConst+3944(SB)/8, $0x000000009e8f7ea4 + +// x^8256 mod p(x), x^8192 mod p(x) +DATA ·CastConst+3952(SB)/8, $0x000000003076054e +DATA ·CastConst+3960(SB)/8, $0x00000001c82e562c + +// x^7232 mod p(x), x^7168 mod p(x) +DATA ·CastConst+3968(SB)/8, $0x000000012a608ea4 +DATA ·CastConst+3976(SB)/8, $0x00000000ca9f09ce + +// x^6208 mod p(x), x^6144 mod p(x) +DATA ·CastConst+3984(SB)/8, $0x00000000784d05fe +DATA ·CastConst+3992(SB)/8, $0x00000000c63764e6 + +// x^5184 mod p(x), x^5120 mod p(x) +DATA ·CastConst+4000(SB)/8, $0x000000016ef0d82a +DATA ·CastConst+4008(SB)/8, $0x0000000168d2e49e + +// x^4160 mod p(x), x^4096 mod p(x) +DATA ·CastConst+4016(SB)/8, $0x0000000075bda454 +DATA ·CastConst+4024(SB)/8, $0x00000000e986c148 + +// x^3136 mod p(x), x^3072 mod p(x) +DATA ·CastConst+4032(SB)/8, $0x000000003dc0a1c4 +DATA ·CastConst+4040(SB)/8, $0x00000000cfb65894 + +// x^2112 mod p(x), x^2048 mod p(x) +DATA ·CastConst+4048(SB)/8, $0x00000000e9a5d8be +DATA ·CastConst+4056(SB)/8, $0x0000000111cadee4 + +// x^1088 mod p(x), x^1024 mod p(x) +DATA ·CastConst+4064(SB)/8, $0x00000001609bc4b4 +DATA ·CastConst+4072(SB)/8, $0x0000000171fb63ce + +// x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) +DATA ·CastConst+4080(SB)/8, $0x5cf015c388e56f72 +DATA ·CastConst+4088(SB)/8, $0x7fec2963e5bf8048 + +// x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) +DATA ·CastConst+4096(SB)/8, $0x963a18920246e2e6 +DATA ·CastConst+4104(SB)/8, $0x38e888d4844752a9 + +// x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) +DATA ·CastConst+4112(SB)/8, $0x419a441956993a31 +DATA ·CastConst+4120(SB)/8, $0x42316c00730206ad + +// x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) +DATA ·CastConst+4128(SB)/8, $0x924752ba2b830011 +DATA ·CastConst+4136(SB)/8, $0x543d5c543e65ddf9 + +// x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) +DATA ·CastConst+4144(SB)/8, $0x55bd7f9518e4a304 +DATA ·CastConst+4152(SB)/8, $0x78e87aaf56767c92 + +// x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) +DATA ·CastConst+4160(SB)/8, $0x6d76739fe0553f1e +DATA ·CastConst+4168(SB)/8, $0x8f68fcec1903da7f + +// x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) +DATA ·CastConst+4176(SB)/8, $0xc133722b1fe0b5c3 +DATA ·CastConst+4184(SB)/8, $0x3f4840246791d588 + +// x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) +DATA ·CastConst+4192(SB)/8, $0x64b67ee0e55ef1f3 +DATA ·CastConst+4200(SB)/8, $0x34c96751b04de25a + +// x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) +DATA ·CastConst+4208(SB)/8, $0x069db049b8fdb1e7 +DATA ·CastConst+4216(SB)/8, $0x156c8e180b4a395b + +// x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) +DATA ·CastConst+4224(SB)/8, $0xa11bfaf3c9e90b9e +DATA ·CastConst+4232(SB)/8, $0xe0b99ccbe661f7be + +// x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) +DATA ·CastConst+4240(SB)/8, $0x817cdc5119b29a35 +DATA ·CastConst+4248(SB)/8, $0x041d37768cd75659 + +// x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) +DATA ·CastConst+4256(SB)/8, $0x1ce9d94b36c41f1c +DATA ·CastConst+4264(SB)/8, $0x3a0777818cfaa965 + +// x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) +DATA ·CastConst+4272(SB)/8, $0x4f256efcb82be955 +DATA ·CastConst+4280(SB)/8, $0x0e148e8252377a55 + +// x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) +DATA ·CastConst+4288(SB)/8, $0xec1631edb2dea967 +DATA ·CastConst+4296(SB)/8, $0x9c25531d19e65dde + +// x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) +DATA ·CastConst+4304(SB)/8, $0x5d27e147510ac59a +DATA ·CastConst+4312(SB)/8, $0x790606ff9957c0a6 + +// x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) +DATA ·CastConst+4320(SB)/8, $0xa66805eb18b8ea18 +DATA ·CastConst+4328(SB)/8, $0x82f63b786ea2d55c + +GLOBL ·CastConst(SB), RODATA, $4336 + +// Barrett constant m - (4^32)/n +DATA ·CastBarConst(SB)/8, $0x00000000dea713f1 +DATA ·CastBarConst+8(SB)/8, $0x0000000000000000 +DATA ·CastBarConst+16(SB)/8, $0x0000000105ec76f1 +DATA ·CastBarConst+24(SB)/8, $0x0000000000000000 +GLOBL ·CastBarConst(SB), RODATA, $32 + +// Reduce 262144 kbits to 1024 bits +// x^261184 mod p(x), x^261120 mod p(x) +DATA ·KoopConst+0(SB)/8, $0x00000000d72535b2 +DATA ·KoopConst+8(SB)/8, $0x000000007fd74916 + +// x^260160 mod p(x), x^260096 mod p(x) +DATA ·KoopConst+16(SB)/8, $0x0000000118a2a1b4 +DATA ·KoopConst+24(SB)/8, $0x000000010e944b56 + +// x^259136 mod p(x), x^259072 mod p(x) +DATA ·KoopConst+32(SB)/8, $0x0000000147b5c49c +DATA ·KoopConst+40(SB)/8, $0x00000000bfe71c20 + +// x^258112 mod p(x), x^258048 mod p(x) +DATA ·KoopConst+48(SB)/8, $0x00000001ca76a040 +DATA ·KoopConst+56(SB)/8, $0x0000000021324d9a + +// x^257088 mod p(x), x^257024 mod p(x) +DATA ·KoopConst+64(SB)/8, $0x00000001e3152efc +DATA ·KoopConst+72(SB)/8, $0x00000000d20972ce + +// x^256064 mod p(x), x^256000 mod p(x) +DATA ·KoopConst+80(SB)/8, $0x00000001b0349792 +DATA ·KoopConst+88(SB)/8, $0x000000003475ea06 + +// x^255040 mod p(x), x^254976 mod p(x) +DATA ·KoopConst+96(SB)/8, $0x0000000120a60fe0 +DATA ·KoopConst+104(SB)/8, $0x00000001e40e36c4 + +// x^254016 mod p(x), x^253952 mod p(x) +DATA ·KoopConst+112(SB)/8, $0x00000000b3c4b082 +DATA ·KoopConst+120(SB)/8, $0x00000000b2490102 + +// x^252992 mod p(x), x^252928 mod p(x) +DATA ·KoopConst+128(SB)/8, $0x000000017fe9f3d2 +DATA ·KoopConst+136(SB)/8, $0x000000016b9e1332 + +// x^251968 mod p(x), x^251904 mod p(x) +DATA ·KoopConst+144(SB)/8, $0x0000000145703cbe +DATA ·KoopConst+152(SB)/8, $0x00000001d6c378f4 + +// x^250944 mod p(x), x^250880 mod p(x) +DATA ·KoopConst+160(SB)/8, $0x0000000107551c9c +DATA ·KoopConst+168(SB)/8, $0x0000000085796eac + +// x^249920 mod p(x), x^249856 mod p(x) +DATA ·KoopConst+176(SB)/8, $0x000000003865a702 +DATA ·KoopConst+184(SB)/8, $0x000000019d2f3aaa + +// x^248896 mod p(x), x^248832 mod p(x) +DATA ·KoopConst+192(SB)/8, $0x000000005504f9b8 +DATA ·KoopConst+200(SB)/8, $0x00000001554ddbd4 + +// x^247872 mod p(x), x^247808 mod p(x) +DATA ·KoopConst+208(SB)/8, $0x00000000239bcdd4 +DATA ·KoopConst+216(SB)/8, $0x00000000a76376b0 + +// x^246848 mod p(x), x^246784 mod p(x) +DATA ·KoopConst+224(SB)/8, $0x00000000caead774 +DATA ·KoopConst+232(SB)/8, $0x0000000139b7283c + +// x^245824 mod p(x), x^245760 mod p(x) +DATA ·KoopConst+240(SB)/8, $0x0000000022a3fa16 +DATA ·KoopConst+248(SB)/8, $0x0000000111087030 + +// x^244800 mod p(x), x^244736 mod p(x) +DATA ·KoopConst+256(SB)/8, $0x000000011f89160e +DATA ·KoopConst+264(SB)/8, $0x00000000ad786dc2 + +// x^243776 mod p(x), x^243712 mod p(x) +DATA ·KoopConst+272(SB)/8, $0x00000001a976c248 +DATA ·KoopConst+280(SB)/8, $0x00000000b7a1d068 + +// x^242752 mod p(x), x^242688 mod p(x) +DATA ·KoopConst+288(SB)/8, $0x00000000c20d09c8 +DATA ·KoopConst+296(SB)/8, $0x000000009c5c591c + +// x^241728 mod p(x), x^241664 mod p(x) +DATA ·KoopConst+304(SB)/8, $0x000000016264fe38 +DATA ·KoopConst+312(SB)/8, $0x000000016482aa1a + +// x^240704 mod p(x), x^240640 mod p(x) +DATA ·KoopConst+320(SB)/8, $0x00000001b57aee6a +DATA ·KoopConst+328(SB)/8, $0x000000009a409ba8 + +// x^239680 mod p(x), x^239616 mod p(x) +DATA ·KoopConst+336(SB)/8, $0x00000000e8f1be0a +DATA ·KoopConst+344(SB)/8, $0x00000001ad8eaed8 + +// x^238656 mod p(x), x^238592 mod p(x) +DATA ·KoopConst+352(SB)/8, $0x0000000053fcd0fc +DATA ·KoopConst+360(SB)/8, $0x000000017558b57a + +// x^237632 mod p(x), x^237568 mod p(x) +DATA ·KoopConst+368(SB)/8, $0x000000012df9d496 +DATA ·KoopConst+376(SB)/8, $0x00000000cbb749c8 + +// x^236608 mod p(x), x^236544 mod p(x) +DATA ·KoopConst+384(SB)/8, $0x000000004cb0db26 +DATA ·KoopConst+392(SB)/8, $0x000000008524fc5a + +// x^235584 mod p(x), x^235520 mod p(x) +DATA ·KoopConst+400(SB)/8, $0x00000001150c4584 +DATA ·KoopConst+408(SB)/8, $0x0000000028ce6b76 + +// x^234560 mod p(x), x^234496 mod p(x) +DATA ·KoopConst+416(SB)/8, $0x0000000104f52056 +DATA ·KoopConst+424(SB)/8, $0x00000000e0c48bdc + +// x^233536 mod p(x), x^233472 mod p(x) +DATA ·KoopConst+432(SB)/8, $0x000000008ea11ac8 +DATA ·KoopConst+440(SB)/8, $0x000000003dd3bf9a + +// x^232512 mod p(x), x^232448 mod p(x) +DATA ·KoopConst+448(SB)/8, $0x00000001cc0a3942 +DATA ·KoopConst+456(SB)/8, $0x00000000cb71066c + +// x^231488 mod p(x), x^231424 mod p(x) +DATA ·KoopConst+464(SB)/8, $0x00000000d26231e6 +DATA ·KoopConst+472(SB)/8, $0x00000001d4ee1540 + +// x^230464 mod p(x), x^230400 mod p(x) +DATA ·KoopConst+480(SB)/8, $0x00000000c70d5730 +DATA ·KoopConst+488(SB)/8, $0x00000001d82bed0a + +// x^229440 mod p(x), x^229376 mod p(x) +DATA ·KoopConst+496(SB)/8, $0x00000000e215dfc4 +DATA ·KoopConst+504(SB)/8, $0x000000016e0c7d86 + +// x^228416 mod p(x), x^228352 mod p(x) +DATA ·KoopConst+512(SB)/8, $0x000000013870d0dc +DATA ·KoopConst+520(SB)/8, $0x00000001437051b0 + +// x^227392 mod p(x), x^227328 mod p(x) +DATA ·KoopConst+528(SB)/8, $0x0000000153e4cf3c +DATA ·KoopConst+536(SB)/8, $0x00000000f9a8d4be + +// x^226368 mod p(x), x^226304 mod p(x) +DATA ·KoopConst+544(SB)/8, $0x0000000125f6fdf0 +DATA ·KoopConst+552(SB)/8, $0x000000016b09be1c + +// x^225344 mod p(x), x^225280 mod p(x) +DATA ·KoopConst+560(SB)/8, $0x0000000157ba3a82 +DATA ·KoopConst+568(SB)/8, $0x0000000105f50ed6 + +// x^224320 mod p(x), x^224256 mod p(x) +DATA ·KoopConst+576(SB)/8, $0x00000001cf711064 +DATA ·KoopConst+584(SB)/8, $0x00000001ca7fe3cc + +// x^223296 mod p(x), x^223232 mod p(x) +DATA ·KoopConst+592(SB)/8, $0x00000001006353d2 +DATA ·KoopConst+600(SB)/8, $0x0000000192372e78 + +// x^222272 mod p(x), x^222208 mod p(x) +DATA ·KoopConst+608(SB)/8, $0x000000010cd9faec +DATA ·KoopConst+616(SB)/8, $0x000000008a47af7e + +// x^221248 mod p(x), x^221184 mod p(x) +DATA ·KoopConst+624(SB)/8, $0x000000012148b190 +DATA ·KoopConst+632(SB)/8, $0x00000000a67473e8 + +// x^220224 mod p(x), x^220160 mod p(x) +DATA ·KoopConst+640(SB)/8, $0x00000000776473d6 +DATA ·KoopConst+648(SB)/8, $0x000000013689f2fa + +// x^219200 mod p(x), x^219136 mod p(x) +DATA ·KoopConst+656(SB)/8, $0x00000001ce765bd6 +DATA ·KoopConst+664(SB)/8, $0x00000000e7231774 + +// x^218176 mod p(x), x^218112 mod p(x) +DATA ·KoopConst+672(SB)/8, $0x00000000b29165e8 +DATA ·KoopConst+680(SB)/8, $0x0000000011b5ae68 + +// x^217152 mod p(x), x^217088 mod p(x) +DATA ·KoopConst+688(SB)/8, $0x0000000084ff5a68 +DATA ·KoopConst+696(SB)/8, $0x000000004fd5c188 + +// x^216128 mod p(x), x^216064 mod p(x) +DATA ·KoopConst+704(SB)/8, $0x00000001921e9076 +DATA ·KoopConst+712(SB)/8, $0x000000012148fa22 + +// x^215104 mod p(x), x^215040 mod p(x) +DATA ·KoopConst+720(SB)/8, $0x000000009a753a3c +DATA ·KoopConst+728(SB)/8, $0x000000010cff4f3e + +// x^214080 mod p(x), x^214016 mod p(x) +DATA ·KoopConst+736(SB)/8, $0x000000000251401e +DATA ·KoopConst+744(SB)/8, $0x00000001f9d991d4 + +// x^213056 mod p(x), x^212992 mod p(x) +DATA ·KoopConst+752(SB)/8, $0x00000001f65541fa +DATA ·KoopConst+760(SB)/8, $0x00000001c31db214 + +// x^212032 mod p(x), x^211968 mod p(x) +DATA ·KoopConst+768(SB)/8, $0x00000001d8c8117a +DATA ·KoopConst+776(SB)/8, $0x00000001849fba4a + +// x^211008 mod p(x), x^210944 mod p(x) +DATA ·KoopConst+784(SB)/8, $0x000000014f7a2200 +DATA ·KoopConst+792(SB)/8, $0x00000001cb603184 + +// x^209984 mod p(x), x^209920 mod p(x) +DATA ·KoopConst+800(SB)/8, $0x000000005154a9f4 +DATA ·KoopConst+808(SB)/8, $0x0000000132db7116 + +// x^208960 mod p(x), x^208896 mod p(x) +DATA ·KoopConst+816(SB)/8, $0x00000001dfc69196 +DATA ·KoopConst+824(SB)/8, $0x0000000010694e22 + +// x^207936 mod p(x), x^207872 mod p(x) +DATA ·KoopConst+832(SB)/8, $0x00000001c29f1aa0 +DATA ·KoopConst+840(SB)/8, $0x0000000103b7b478 + +// x^206912 mod p(x), x^206848 mod p(x) +DATA ·KoopConst+848(SB)/8, $0x000000013785f232 +DATA ·KoopConst+856(SB)/8, $0x000000000ab44030 + +// x^205888 mod p(x), x^205824 mod p(x) +DATA ·KoopConst+864(SB)/8, $0x000000010133536e +DATA ·KoopConst+872(SB)/8, $0x0000000131385b68 + +// x^204864 mod p(x), x^204800 mod p(x) +DATA ·KoopConst+880(SB)/8, $0x00000001d45421dc +DATA ·KoopConst+888(SB)/8, $0x00000001761dab66 + +// x^203840 mod p(x), x^203776 mod p(x) +DATA ·KoopConst+896(SB)/8, $0x000000000b59cc28 +DATA ·KoopConst+904(SB)/8, $0x000000012cf0a2a6 + +// x^202816 mod p(x), x^202752 mod p(x) +DATA ·KoopConst+912(SB)/8, $0x00000001f2f74aba +DATA ·KoopConst+920(SB)/8, $0x00000001f4ce25a2 + +// x^201792 mod p(x), x^201728 mod p(x) +DATA ·KoopConst+928(SB)/8, $0x00000000fb308e7e +DATA ·KoopConst+936(SB)/8, $0x000000014c2aae20 + +// x^200768 mod p(x), x^200704 mod p(x) +DATA ·KoopConst+944(SB)/8, $0x0000000167583fa6 +DATA ·KoopConst+952(SB)/8, $0x00000001c162a55a + +// x^199744 mod p(x), x^199680 mod p(x) +DATA ·KoopConst+960(SB)/8, $0x000000017ebb13e0 +DATA ·KoopConst+968(SB)/8, $0x0000000185681a40 + +// x^198720 mod p(x), x^198656 mod p(x) +DATA ·KoopConst+976(SB)/8, $0x00000001ca653306 +DATA ·KoopConst+984(SB)/8, $0x00000001f2642b48 + +// x^197696 mod p(x), x^197632 mod p(x) +DATA ·KoopConst+992(SB)/8, $0x0000000093bb6946 +DATA ·KoopConst+1000(SB)/8, $0x00000001d9cb5a78 + +// x^196672 mod p(x), x^196608 mod p(x) +DATA ·KoopConst+1008(SB)/8, $0x00000000cbc1553e +DATA ·KoopConst+1016(SB)/8, $0x000000008059328c + +// x^195648 mod p(x), x^195584 mod p(x) +DATA ·KoopConst+1024(SB)/8, $0x00000001f9a86fec +DATA ·KoopConst+1032(SB)/8, $0x000000009373c360 + +// x^194624 mod p(x), x^194560 mod p(x) +DATA ·KoopConst+1040(SB)/8, $0x0000000005c52d8a +DATA ·KoopConst+1048(SB)/8, $0x00000001a14061d6 + +// x^193600 mod p(x), x^193536 mod p(x) +DATA ·KoopConst+1056(SB)/8, $0x000000010d8dc668 +DATA ·KoopConst+1064(SB)/8, $0x00000000a9864d48 + +// x^192576 mod p(x), x^192512 mod p(x) +DATA ·KoopConst+1072(SB)/8, $0x0000000158571310 +DATA ·KoopConst+1080(SB)/8, $0x000000011df8c040 + +// x^191552 mod p(x), x^191488 mod p(x) +DATA ·KoopConst+1088(SB)/8, $0x0000000166102348 +DATA ·KoopConst+1096(SB)/8, $0x0000000023a3e6b6 + +// x^190528 mod p(x), x^190464 mod p(x) +DATA ·KoopConst+1104(SB)/8, $0x0000000009513050 +DATA ·KoopConst+1112(SB)/8, $0x00000001207db28a + +// x^189504 mod p(x), x^189440 mod p(x) +DATA ·KoopConst+1120(SB)/8, $0x00000000b0725c74 +DATA ·KoopConst+1128(SB)/8, $0x00000000f94bc632 + +// x^188480 mod p(x), x^188416 mod p(x) +DATA ·KoopConst+1136(SB)/8, $0x000000002985c7e2 +DATA ·KoopConst+1144(SB)/8, $0x00000000ea32cbf6 + +// x^187456 mod p(x), x^187392 mod p(x) +DATA ·KoopConst+1152(SB)/8, $0x00000000a7d4da9e +DATA ·KoopConst+1160(SB)/8, $0x0000000004eb981a + +// x^186432 mod p(x), x^186368 mod p(x) +DATA ·KoopConst+1168(SB)/8, $0x000000000a3f8792 +DATA ·KoopConst+1176(SB)/8, $0x00000000ca8ce712 + +// x^185408 mod p(x), x^185344 mod p(x) +DATA ·KoopConst+1184(SB)/8, $0x00000001ca2c1ce4 +DATA ·KoopConst+1192(SB)/8, $0x0000000065ba801c + +// x^184384 mod p(x), x^184320 mod p(x) +DATA ·KoopConst+1200(SB)/8, $0x00000000e2900196 +DATA ·KoopConst+1208(SB)/8, $0x0000000194aade7a + +// x^183360 mod p(x), x^183296 mod p(x) +DATA ·KoopConst+1216(SB)/8, $0x00000001fbadf0e4 +DATA ·KoopConst+1224(SB)/8, $0x00000001e7939fb2 + +// x^182336 mod p(x), x^182272 mod p(x) +DATA ·KoopConst+1232(SB)/8, $0x00000000d5d96c40 +DATA ·KoopConst+1240(SB)/8, $0x0000000098e5fe22 + +// x^181312 mod p(x), x^181248 mod p(x) +DATA ·KoopConst+1248(SB)/8, $0x000000015c11d3f2 +DATA ·KoopConst+1256(SB)/8, $0x000000016bba0324 + +// x^180288 mod p(x), x^180224 mod p(x) +DATA ·KoopConst+1264(SB)/8, $0x0000000111fb2648 +DATA ·KoopConst+1272(SB)/8, $0x0000000104dce052 + +// x^179264 mod p(x), x^179200 mod p(x) +DATA ·KoopConst+1280(SB)/8, $0x00000001d9f3a564 +DATA ·KoopConst+1288(SB)/8, $0x00000001af31a42e + +// x^178240 mod p(x), x^178176 mod p(x) +DATA ·KoopConst+1296(SB)/8, $0x00000001b556cd1e +DATA ·KoopConst+1304(SB)/8, $0x00000001c56c57ba + +// x^177216 mod p(x), x^177152 mod p(x) +DATA ·KoopConst+1312(SB)/8, $0x0000000101994d2c +DATA ·KoopConst+1320(SB)/8, $0x00000000f6bb1a2e + +// x^176192 mod p(x), x^176128 mod p(x) +DATA ·KoopConst+1328(SB)/8, $0x00000001e8dbf09c +DATA ·KoopConst+1336(SB)/8, $0x00000001abdbf2b2 + +// x^175168 mod p(x), x^175104 mod p(x) +DATA ·KoopConst+1344(SB)/8, $0x000000015580543a +DATA ·KoopConst+1352(SB)/8, $0x00000001a665a880 + +// x^174144 mod p(x), x^174080 mod p(x) +DATA ·KoopConst+1360(SB)/8, $0x00000000c7074f24 +DATA ·KoopConst+1368(SB)/8, $0x00000000c102c700 + +// x^173120 mod p(x), x^173056 mod p(x) +DATA ·KoopConst+1376(SB)/8, $0x00000000fa4112b0 +DATA ·KoopConst+1384(SB)/8, $0x00000000ee362a50 + +// x^172096 mod p(x), x^172032 mod p(x) +DATA ·KoopConst+1392(SB)/8, $0x00000000e786c13e +DATA ·KoopConst+1400(SB)/8, $0x0000000045f29038 + +// x^171072 mod p(x), x^171008 mod p(x) +DATA ·KoopConst+1408(SB)/8, $0x00000001e45e3694 +DATA ·KoopConst+1416(SB)/8, $0x0000000117b9ab5c + +// x^170048 mod p(x), x^169984 mod p(x) +DATA ·KoopConst+1424(SB)/8, $0x000000005423dd8c +DATA ·KoopConst+1432(SB)/8, $0x00000001115dff5e + +// x^169024 mod p(x), x^168960 mod p(x) +DATA ·KoopConst+1440(SB)/8, $0x00000001a1e67766 +DATA ·KoopConst+1448(SB)/8, $0x0000000117fad29c + +// x^168000 mod p(x), x^167936 mod p(x) +DATA ·KoopConst+1456(SB)/8, $0x0000000041a3f508 +DATA ·KoopConst+1464(SB)/8, $0x000000017de134e6 + +// x^166976 mod p(x), x^166912 mod p(x) +DATA ·KoopConst+1472(SB)/8, $0x000000003e792f7e +DATA ·KoopConst+1480(SB)/8, $0x00000000a2f5d19c + +// x^165952 mod p(x), x^165888 mod p(x) +DATA ·KoopConst+1488(SB)/8, $0x00000000c8948aaa +DATA ·KoopConst+1496(SB)/8, $0x00000000dee13658 + +// x^164928 mod p(x), x^164864 mod p(x) +DATA ·KoopConst+1504(SB)/8, $0x000000005d4ccb36 +DATA ·KoopConst+1512(SB)/8, $0x000000015355440c + +// x^163904 mod p(x), x^163840 mod p(x) +DATA ·KoopConst+1520(SB)/8, $0x00000000e92a78a2 +DATA ·KoopConst+1528(SB)/8, $0x0000000197a21778 + +// x^162880 mod p(x), x^162816 mod p(x) +DATA ·KoopConst+1536(SB)/8, $0x000000016ba67caa +DATA ·KoopConst+1544(SB)/8, $0x00000001a3835ec0 + +// x^161856 mod p(x), x^161792 mod p(x) +DATA ·KoopConst+1552(SB)/8, $0x000000004838afc6 +DATA ·KoopConst+1560(SB)/8, $0x0000000011f20912 + +// x^160832 mod p(x), x^160768 mod p(x) +DATA ·KoopConst+1568(SB)/8, $0x000000016644e308 +DATA ·KoopConst+1576(SB)/8, $0x00000001cce9d6cc + +// x^159808 mod p(x), x^159744 mod p(x) +DATA ·KoopConst+1584(SB)/8, $0x0000000037c22f42 +DATA ·KoopConst+1592(SB)/8, $0x0000000084d1e71c + +// x^158784 mod p(x), x^158720 mod p(x) +DATA ·KoopConst+1600(SB)/8, $0x00000001dedba6ca +DATA ·KoopConst+1608(SB)/8, $0x0000000197c2ad54 + +// x^157760 mod p(x), x^157696 mod p(x) +DATA ·KoopConst+1616(SB)/8, $0x0000000146a43500 +DATA ·KoopConst+1624(SB)/8, $0x000000018609261e + +// x^156736 mod p(x), x^156672 mod p(x) +DATA ·KoopConst+1632(SB)/8, $0x000000001cf762de +DATA ·KoopConst+1640(SB)/8, $0x00000000b4b4c224 + +// x^155712 mod p(x), x^155648 mod p(x) +DATA ·KoopConst+1648(SB)/8, $0x0000000022ff7eda +DATA ·KoopConst+1656(SB)/8, $0x0000000080817496 + +// x^154688 mod p(x), x^154624 mod p(x) +DATA ·KoopConst+1664(SB)/8, $0x00000001b6df625e +DATA ·KoopConst+1672(SB)/8, $0x00000001aefb473c + +// x^153664 mod p(x), x^153600 mod p(x) +DATA ·KoopConst+1680(SB)/8, $0x00000001cc99ab58 +DATA ·KoopConst+1688(SB)/8, $0x000000013f1aa474 + +// x^152640 mod p(x), x^152576 mod p(x) +DATA ·KoopConst+1696(SB)/8, $0x00000001c53f5ce2 +DATA ·KoopConst+1704(SB)/8, $0x000000010ca2c756 + +// x^151616 mod p(x), x^151552 mod p(x) +DATA ·KoopConst+1712(SB)/8, $0x0000000082a9c60e +DATA ·KoopConst+1720(SB)/8, $0x000000002c63533a + +// x^150592 mod p(x), x^150528 mod p(x) +DATA ·KoopConst+1728(SB)/8, $0x00000000ec78b570 +DATA ·KoopConst+1736(SB)/8, $0x00000001b7f2ad50 + +// x^149568 mod p(x), x^149504 mod p(x) +DATA ·KoopConst+1744(SB)/8, $0x00000001d3fe1e8e +DATA ·KoopConst+1752(SB)/8, $0x00000000acdf4c20 + +// x^148544 mod p(x), x^148480 mod p(x) +DATA ·KoopConst+1760(SB)/8, $0x000000007f9a7bde +DATA ·KoopConst+1768(SB)/8, $0x000000000bd29e8c + +// x^147520 mod p(x), x^147456 mod p(x) +DATA ·KoopConst+1776(SB)/8, $0x00000000e606f518 +DATA ·KoopConst+1784(SB)/8, $0x00000001eef6992e + +// x^146496 mod p(x), x^146432 mod p(x) +DATA ·KoopConst+1792(SB)/8, $0x000000008538cb96 +DATA ·KoopConst+1800(SB)/8, $0x00000000b01644e6 + +// x^145472 mod p(x), x^145408 mod p(x) +DATA ·KoopConst+1808(SB)/8, $0x0000000131d030b2 +DATA ·KoopConst+1816(SB)/8, $0x0000000059c51acc + +// x^144448 mod p(x), x^144384 mod p(x) +DATA ·KoopConst+1824(SB)/8, $0x00000000115a4d0e +DATA ·KoopConst+1832(SB)/8, $0x00000001a2849272 + +// x^143424 mod p(x), x^143360 mod p(x) +DATA ·KoopConst+1840(SB)/8, $0x00000000e8a5356e +DATA ·KoopConst+1848(SB)/8, $0x00000001a4e0b610 + +// x^142400 mod p(x), x^142336 mod p(x) +DATA ·KoopConst+1856(SB)/8, $0x0000000158d988be +DATA ·KoopConst+1864(SB)/8, $0x00000000084e81a6 + +// x^141376 mod p(x), x^141312 mod p(x) +DATA ·KoopConst+1872(SB)/8, $0x00000001240db498 +DATA ·KoopConst+1880(SB)/8, $0x00000001b71f1fd8 + +// x^140352 mod p(x), x^140288 mod p(x) +DATA ·KoopConst+1888(SB)/8, $0x000000009ce87826 +DATA ·KoopConst+1896(SB)/8, $0x000000017f7df380 + +// x^139328 mod p(x), x^139264 mod p(x) +DATA ·KoopConst+1904(SB)/8, $0x0000000021944aae +DATA ·KoopConst+1912(SB)/8, $0x00000001f7f4e190 + +// x^138304 mod p(x), x^138240 mod p(x) +DATA ·KoopConst+1920(SB)/8, $0x00000001cea3d67e +DATA ·KoopConst+1928(SB)/8, $0x0000000150220d86 + +// x^137280 mod p(x), x^137216 mod p(x) +DATA ·KoopConst+1936(SB)/8, $0x000000004434e926 +DATA ·KoopConst+1944(SB)/8, $0x00000001db7d2b2e + +// x^136256 mod p(x), x^136192 mod p(x) +DATA ·KoopConst+1952(SB)/8, $0x0000000011db8cbe +DATA ·KoopConst+1960(SB)/8, $0x00000000b6ba9668 + +// x^135232 mod p(x), x^135168 mod p(x) +DATA ·KoopConst+1968(SB)/8, $0x00000001f6e0b8dc +DATA ·KoopConst+1976(SB)/8, $0x0000000103fdcecc + +// x^134208 mod p(x), x^134144 mod p(x) +DATA ·KoopConst+1984(SB)/8, $0x00000001f163f4a0 +DATA ·KoopConst+1992(SB)/8, $0x0000000079816a22 + +// x^133184 mod p(x), x^133120 mod p(x) +DATA ·KoopConst+2000(SB)/8, $0x000000007b6cc60e +DATA ·KoopConst+2008(SB)/8, $0x0000000173483482 + +// x^132160 mod p(x), x^132096 mod p(x) +DATA ·KoopConst+2016(SB)/8, $0x000000000f26c82c +DATA ·KoopConst+2024(SB)/8, $0x00000000643ea4c0 + +// x^131136 mod p(x), x^131072 mod p(x) +DATA ·KoopConst+2032(SB)/8, $0x00000000b0acad80 +DATA ·KoopConst+2040(SB)/8, $0x00000000a64752d2 + +// x^130112 mod p(x), x^130048 mod p(x) +DATA ·KoopConst+2048(SB)/8, $0x000000013687e91c +DATA ·KoopConst+2056(SB)/8, $0x00000000ca98eb3a + +// x^129088 mod p(x), x^129024 mod p(x) +DATA ·KoopConst+2064(SB)/8, $0x000000006bac3a96 +DATA ·KoopConst+2072(SB)/8, $0x00000001ca6ac8f8 + +// x^128064 mod p(x), x^128000 mod p(x) +DATA ·KoopConst+2080(SB)/8, $0x00000001bf197d5c +DATA ·KoopConst+2088(SB)/8, $0x00000001c48e2e68 + +// x^127040 mod p(x), x^126976 mod p(x) +DATA ·KoopConst+2096(SB)/8, $0x00000000256e84f2 +DATA ·KoopConst+2104(SB)/8, $0x0000000070086782 + +// x^126016 mod p(x), x^125952 mod p(x) +DATA ·KoopConst+2112(SB)/8, $0x000000003eff0d16 +DATA ·KoopConst+2120(SB)/8, $0x00000000f763621c + +// x^124992 mod p(x), x^124928 mod p(x) +DATA ·KoopConst+2128(SB)/8, $0x00000001748e9fd2 +DATA ·KoopConst+2136(SB)/8, $0x00000000ba58646a + +// x^123968 mod p(x), x^123904 mod p(x) +DATA ·KoopConst+2144(SB)/8, $0x000000015bb85b42 +DATA ·KoopConst+2152(SB)/8, $0x0000000138e157d8 + +// x^122944 mod p(x), x^122880 mod p(x) +DATA ·KoopConst+2160(SB)/8, $0x0000000164d1a980 +DATA ·KoopConst+2168(SB)/8, $0x00000001bf0a09dc + +// x^121920 mod p(x), x^121856 mod p(x) +DATA ·KoopConst+2176(SB)/8, $0x000000001415c9f0 +DATA ·KoopConst+2184(SB)/8, $0x0000000098faf300 + +// x^120896 mod p(x), x^120832 mod p(x) +DATA ·KoopConst+2192(SB)/8, $0x0000000195ae2f48 +DATA ·KoopConst+2200(SB)/8, $0x00000001f872f2c6 + +// x^119872 mod p(x), x^119808 mod p(x) +DATA ·KoopConst+2208(SB)/8, $0x0000000059d1d81a +DATA ·KoopConst+2216(SB)/8, $0x00000000f92577be + +// x^118848 mod p(x), x^118784 mod p(x) +DATA ·KoopConst+2224(SB)/8, $0x00000001bf80257a +DATA ·KoopConst+2232(SB)/8, $0x00000001a4d975f4 + +// x^117824 mod p(x), x^117760 mod p(x) +DATA ·KoopConst+2240(SB)/8, $0x000000011e39bfce +DATA ·KoopConst+2248(SB)/8, $0x000000018b74eeca + +// x^116800 mod p(x), x^116736 mod p(x) +DATA ·KoopConst+2256(SB)/8, $0x00000001287a0456 +DATA ·KoopConst+2264(SB)/8, $0x00000000e8980404 + +// x^115776 mod p(x), x^115712 mod p(x) +DATA ·KoopConst+2272(SB)/8, $0x00000000a5eb589c +DATA ·KoopConst+2280(SB)/8, $0x0000000176ef2b74 + +// x^114752 mod p(x), x^114688 mod p(x) +DATA ·KoopConst+2288(SB)/8, $0x000000017d71c452 +DATA ·KoopConst+2296(SB)/8, $0x0000000063c85caa + +// x^113728 mod p(x), x^113664 mod p(x) +DATA ·KoopConst+2304(SB)/8, $0x00000000fa941f08 +DATA ·KoopConst+2312(SB)/8, $0x00000001708012cc + +// x^112704 mod p(x), x^112640 mod p(x) +DATA ·KoopConst+2320(SB)/8, $0x0000000064ea030e +DATA ·KoopConst+2328(SB)/8, $0x00000000474d58f6 + +// x^111680 mod p(x), x^111616 mod p(x) +DATA ·KoopConst+2336(SB)/8, $0x000000019b7cc7ba +DATA ·KoopConst+2344(SB)/8, $0x00000001c76085a6 + +// x^110656 mod p(x), x^110592 mod p(x) +DATA ·KoopConst+2352(SB)/8, $0x00000000225cb7ba +DATA ·KoopConst+2360(SB)/8, $0x000000018fb0681a + +// x^109632 mod p(x), x^109568 mod p(x) +DATA ·KoopConst+2368(SB)/8, $0x000000010ab3e1da +DATA ·KoopConst+2376(SB)/8, $0x00000001fcee1f16 + +// x^108608 mod p(x), x^108544 mod p(x) +DATA ·KoopConst+2384(SB)/8, $0x00000001ce5cc33e +DATA ·KoopConst+2392(SB)/8, $0x00000000cfbffb7c + +// x^107584 mod p(x), x^107520 mod p(x) +DATA ·KoopConst+2400(SB)/8, $0x000000005e980f6e +DATA ·KoopConst+2408(SB)/8, $0x000000017af8ee72 + +// x^106560 mod p(x), x^106496 mod p(x) +DATA ·KoopConst+2416(SB)/8, $0x00000000d3bf3f46 +DATA ·KoopConst+2424(SB)/8, $0x000000001c2ad3e2 + +// x^105536 mod p(x), x^105472 mod p(x) +DATA ·KoopConst+2432(SB)/8, $0x000000018d554ae0 +DATA ·KoopConst+2440(SB)/8, $0x00000000ee05450a + +// x^104512 mod p(x), x^104448 mod p(x) +DATA ·KoopConst+2448(SB)/8, $0x000000018e276eb0 +DATA ·KoopConst+2456(SB)/8, $0x000000000f7d5bac + +// x^103488 mod p(x), x^103424 mod p(x) +DATA ·KoopConst+2464(SB)/8, $0x000000001c0319ce +DATA ·KoopConst+2472(SB)/8, $0x00000001cb26e004 + +// x^102464 mod p(x), x^102400 mod p(x) +DATA ·KoopConst+2480(SB)/8, $0x00000001ca0c75ec +DATA ·KoopConst+2488(SB)/8, $0x00000001553314e2 + +// x^101440 mod p(x), x^101376 mod p(x) +DATA ·KoopConst+2496(SB)/8, $0x00000001fb075330 +DATA ·KoopConst+2504(SB)/8, $0x000000005729be2c + +// x^100416 mod p(x), x^100352 mod p(x) +DATA ·KoopConst+2512(SB)/8, $0x00000000677920e4 +DATA ·KoopConst+2520(SB)/8, $0x0000000192c4479c + +// x^99392 mod p(x), x^99328 mod p(x) +DATA ·KoopConst+2528(SB)/8, $0x00000000332247c8 +DATA ·KoopConst+2536(SB)/8, $0x0000000078d842b6 + +// x^98368 mod p(x), x^98304 mod p(x) +DATA ·KoopConst+2544(SB)/8, $0x00000000ef84fc6c +DATA ·KoopConst+2552(SB)/8, $0x0000000145ffa282 + +// x^97344 mod p(x), x^97280 mod p(x) +DATA ·KoopConst+2560(SB)/8, $0x0000000139ba7690 +DATA ·KoopConst+2568(SB)/8, $0x000000019d679bf4 + +// x^96320 mod p(x), x^96256 mod p(x) +DATA ·KoopConst+2576(SB)/8, $0x00000000029ef444 +DATA ·KoopConst+2584(SB)/8, $0x000000019412f7a0 + +// x^95296 mod p(x), x^95232 mod p(x) +DATA ·KoopConst+2592(SB)/8, $0x00000001d872048c +DATA ·KoopConst+2600(SB)/8, $0x00000000b28c5c96 + +// x^94272 mod p(x), x^94208 mod p(x) +DATA ·KoopConst+2608(SB)/8, $0x000000016535d70a +DATA ·KoopConst+2616(SB)/8, $0x00000000554bfd44 + +// x^93248 mod p(x), x^93184 mod p(x) +DATA ·KoopConst+2624(SB)/8, $0x00000000761dd222 +DATA ·KoopConst+2632(SB)/8, $0x00000000ce9cfa48 + +// x^92224 mod p(x), x^92160 mod p(x) +DATA ·KoopConst+2640(SB)/8, $0x00000001509a3a44 +DATA ·KoopConst+2648(SB)/8, $0x00000000a4702ab2 + +// x^91200 mod p(x), x^91136 mod p(x) +DATA ·KoopConst+2656(SB)/8, $0x000000007e7019f2 +DATA ·KoopConst+2664(SB)/8, $0x00000001c967fbee + +// x^90176 mod p(x), x^90112 mod p(x) +DATA ·KoopConst+2672(SB)/8, $0x00000000fb4c56ea +DATA ·KoopConst+2680(SB)/8, $0x00000000fd514b3e + +// x^89152 mod p(x), x^89088 mod p(x) +DATA ·KoopConst+2688(SB)/8, $0x000000012022e0ee +DATA ·KoopConst+2696(SB)/8, $0x00000001c0b6f95e + +// x^88128 mod p(x), x^88064 mod p(x) +DATA ·KoopConst+2704(SB)/8, $0x0000000004bc6054 +DATA ·KoopConst+2712(SB)/8, $0x0000000180e103ce + +// x^87104 mod p(x), x^87040 mod p(x) +DATA ·KoopConst+2720(SB)/8, $0x000000017a1a0030 +DATA ·KoopConst+2728(SB)/8, $0x00000001a1630916 + +// x^86080 mod p(x), x^86016 mod p(x) +DATA ·KoopConst+2736(SB)/8, $0x00000001c021a864 +DATA ·KoopConst+2744(SB)/8, $0x000000009a727fb2 + +// x^85056 mod p(x), x^84992 mod p(x) +DATA ·KoopConst+2752(SB)/8, $0x000000009c54421e +DATA ·KoopConst+2760(SB)/8, $0x00000000e83b081a + +// x^84032 mod p(x), x^83968 mod p(x) +DATA ·KoopConst+2768(SB)/8, $0x00000001b4e33e6a +DATA ·KoopConst+2776(SB)/8, $0x000000006b1a1f44 + +// x^83008 mod p(x), x^82944 mod p(x) +DATA ·KoopConst+2784(SB)/8, $0x000000015d615af0 +DATA ·KoopConst+2792(SB)/8, $0x00000000cf280394 + +// x^81984 mod p(x), x^81920 mod p(x) +DATA ·KoopConst+2800(SB)/8, $0x00000001914a3ba8 +DATA ·KoopConst+2808(SB)/8, $0x00000001154b8a9a + +// x^80960 mod p(x), x^80896 mod p(x) +DATA ·KoopConst+2816(SB)/8, $0x000000005f72ec44 +DATA ·KoopConst+2824(SB)/8, $0x0000000149ec63e2 + +// x^79936 mod p(x), x^79872 mod p(x) +DATA ·KoopConst+2832(SB)/8, $0x00000000a33746a8 +DATA ·KoopConst+2840(SB)/8, $0x000000018ef902c4 + +// x^78912 mod p(x), x^78848 mod p(x) +DATA ·KoopConst+2848(SB)/8, $0x00000001c91e90d4 +DATA ·KoopConst+2856(SB)/8, $0x0000000069addb88 + +// x^77888 mod p(x), x^77824 mod p(x) +DATA ·KoopConst+2864(SB)/8, $0x00000001052eb05e +DATA ·KoopConst+2872(SB)/8, $0x00000000e90a29ae + +// x^76864 mod p(x), x^76800 mod p(x) +DATA ·KoopConst+2880(SB)/8, $0x000000006a32f754 +DATA ·KoopConst+2888(SB)/8, $0x00000000c53641ae + +// x^75840 mod p(x), x^75776 mod p(x) +DATA ·KoopConst+2896(SB)/8, $0x00000001ecbd6436 +DATA ·KoopConst+2904(SB)/8, $0x00000000a17c3796 + +// x^74816 mod p(x), x^74752 mod p(x) +DATA ·KoopConst+2912(SB)/8, $0x000000000fd3f93a +DATA ·KoopConst+2920(SB)/8, $0x000000015307a62c + +// x^73792 mod p(x), x^73728 mod p(x) +DATA ·KoopConst+2928(SB)/8, $0x00000001686a4c24 +DATA ·KoopConst+2936(SB)/8, $0x000000002f94bbda + +// x^72768 mod p(x), x^72704 mod p(x) +DATA ·KoopConst+2944(SB)/8, $0x00000001e40afca0 +DATA ·KoopConst+2952(SB)/8, $0x0000000072c8b5e6 + +// x^71744 mod p(x), x^71680 mod p(x) +DATA ·KoopConst+2960(SB)/8, $0x000000012779a2b8 +DATA ·KoopConst+2968(SB)/8, $0x00000000f09b7424 + +// x^70720 mod p(x), x^70656 mod p(x) +DATA ·KoopConst+2976(SB)/8, $0x00000000dcdaeb9e +DATA ·KoopConst+2984(SB)/8, $0x00000001c57de3da + +// x^69696 mod p(x), x^69632 mod p(x) +DATA ·KoopConst+2992(SB)/8, $0x00000001674f7a2a +DATA ·KoopConst+3000(SB)/8, $0x000000013922b30e + +// x^68672 mod p(x), x^68608 mod p(x) +DATA ·KoopConst+3008(SB)/8, $0x00000000dcb9e846 +DATA ·KoopConst+3016(SB)/8, $0x000000008759a6c2 + +// x^67648 mod p(x), x^67584 mod p(x) +DATA ·KoopConst+3024(SB)/8, $0x00000000ea9a6af6 +DATA ·KoopConst+3032(SB)/8, $0x00000000545ae424 + +// x^66624 mod p(x), x^66560 mod p(x) +DATA ·KoopConst+3040(SB)/8, $0x000000006d1f7a74 +DATA ·KoopConst+3048(SB)/8, $0x00000001e0cbafd2 + +// x^65600 mod p(x), x^65536 mod p(x) +DATA ·KoopConst+3056(SB)/8, $0x000000006add215e +DATA ·KoopConst+3064(SB)/8, $0x0000000018360c04 + +// x^64576 mod p(x), x^64512 mod p(x) +DATA ·KoopConst+3072(SB)/8, $0x000000010a9ee4b0 +DATA ·KoopConst+3080(SB)/8, $0x00000000941dc432 + +// x^63552 mod p(x), x^63488 mod p(x) +DATA ·KoopConst+3088(SB)/8, $0x00000000304c48d2 +DATA ·KoopConst+3096(SB)/8, $0x0000000004d3566e + +// x^62528 mod p(x), x^62464 mod p(x) +DATA ·KoopConst+3104(SB)/8, $0x0000000163d0e672 +DATA ·KoopConst+3112(SB)/8, $0x0000000096aed14e + +// x^61504 mod p(x), x^61440 mod p(x) +DATA ·KoopConst+3120(SB)/8, $0x0000000010049166 +DATA ·KoopConst+3128(SB)/8, $0x0000000087c13618 + +// x^60480 mod p(x), x^60416 mod p(x) +DATA ·KoopConst+3136(SB)/8, $0x00000001d3913e34 +DATA ·KoopConst+3144(SB)/8, $0x00000001d52f7b0c + +// x^59456 mod p(x), x^59392 mod p(x) +DATA ·KoopConst+3152(SB)/8, $0x00000001e392d54a +DATA ·KoopConst+3160(SB)/8, $0x000000000182058e + +// x^58432 mod p(x), x^58368 mod p(x) +DATA ·KoopConst+3168(SB)/8, $0x0000000173f2704a +DATA ·KoopConst+3176(SB)/8, $0x00000001ed73aa02 + +// x^57408 mod p(x), x^57344 mod p(x) +DATA ·KoopConst+3184(SB)/8, $0x000000019112b480 +DATA ·KoopConst+3192(SB)/8, $0x000000002721a82e + +// x^56384 mod p(x), x^56320 mod p(x) +DATA ·KoopConst+3200(SB)/8, $0x0000000093d295d6 +DATA ·KoopConst+3208(SB)/8, $0x000000012ca83da2 + +// x^55360 mod p(x), x^55296 mod p(x) +DATA ·KoopConst+3216(SB)/8, $0x0000000114e37f44 +DATA ·KoopConst+3224(SB)/8, $0x00000000da358698 + +// x^54336 mod p(x), x^54272 mod p(x) +DATA ·KoopConst+3232(SB)/8, $0x00000000fcfebc86 +DATA ·KoopConst+3240(SB)/8, $0x0000000011fad322 + +// x^53312 mod p(x), x^53248 mod p(x) +DATA ·KoopConst+3248(SB)/8, $0x00000000834c48d6 +DATA ·KoopConst+3256(SB)/8, $0x000000012b25025c + +// x^52288 mod p(x), x^52224 mod p(x) +DATA ·KoopConst+3264(SB)/8, $0x000000017b909372 +DATA ·KoopConst+3272(SB)/8, $0x000000001290cd24 + +// x^51264 mod p(x), x^51200 mod p(x) +DATA ·KoopConst+3280(SB)/8, $0x000000010156b9ac +DATA ·KoopConst+3288(SB)/8, $0x000000016edd0b06 + +// x^50240 mod p(x), x^50176 mod p(x) +DATA ·KoopConst+3296(SB)/8, $0x0000000113a82fa8 +DATA ·KoopConst+3304(SB)/8, $0x00000000c08e222a + +// x^49216 mod p(x), x^49152 mod p(x) +DATA ·KoopConst+3312(SB)/8, $0x0000000182dacb74 +DATA ·KoopConst+3320(SB)/8, $0x00000000cfb4d10e + +// x^48192 mod p(x), x^48128 mod p(x) +DATA ·KoopConst+3328(SB)/8, $0x000000010210dc40 +DATA ·KoopConst+3336(SB)/8, $0x000000013e156ece + +// x^47168 mod p(x), x^47104 mod p(x) +DATA ·KoopConst+3344(SB)/8, $0x000000008ab5ed20 +DATA ·KoopConst+3352(SB)/8, $0x00000000f12d89f8 + +// x^46144 mod p(x), x^46080 mod p(x) +DATA ·KoopConst+3360(SB)/8, $0x00000000810386fa +DATA ·KoopConst+3368(SB)/8, $0x00000001fce3337c + +// x^45120 mod p(x), x^45056 mod p(x) +DATA ·KoopConst+3376(SB)/8, $0x000000011dce2fe2 +DATA ·KoopConst+3384(SB)/8, $0x00000001c4bf3514 + +// x^44096 mod p(x), x^44032 mod p(x) +DATA ·KoopConst+3392(SB)/8, $0x000000004bb0a390 +DATA ·KoopConst+3400(SB)/8, $0x00000001ae67c492 + +// x^43072 mod p(x), x^43008 mod p(x) +DATA ·KoopConst+3408(SB)/8, $0x00000000028d486a +DATA ·KoopConst+3416(SB)/8, $0x00000000302af704 + +// x^42048 mod p(x), x^41984 mod p(x) +DATA ·KoopConst+3424(SB)/8, $0x000000010e4d63fe +DATA ·KoopConst+3432(SB)/8, $0x00000001e375b250 + +// x^41024 mod p(x), x^40960 mod p(x) +DATA ·KoopConst+3440(SB)/8, $0x000000014fd6f458 +DATA ·KoopConst+3448(SB)/8, $0x00000001678b58c0 + +// x^40000 mod p(x), x^39936 mod p(x) +DATA ·KoopConst+3456(SB)/8, $0x00000000db7a83a2 +DATA ·KoopConst+3464(SB)/8, $0x0000000065103c1e + +// x^38976 mod p(x), x^38912 mod p(x) +DATA ·KoopConst+3472(SB)/8, $0x000000016cf9fa3c +DATA ·KoopConst+3480(SB)/8, $0x000000000ccd28ca + +// x^37952 mod p(x), x^37888 mod p(x) +DATA ·KoopConst+3488(SB)/8, $0x000000016bb33912 +DATA ·KoopConst+3496(SB)/8, $0x0000000059c177d4 + +// x^36928 mod p(x), x^36864 mod p(x) +DATA ·KoopConst+3504(SB)/8, $0x0000000135bda8bc +DATA ·KoopConst+3512(SB)/8, $0x00000001d162f83a + +// x^35904 mod p(x), x^35840 mod p(x) +DATA ·KoopConst+3520(SB)/8, $0x000000004e8c6b76 +DATA ·KoopConst+3528(SB)/8, $0x00000001efc0230c + +// x^34880 mod p(x), x^34816 mod p(x) +DATA ·KoopConst+3536(SB)/8, $0x00000000e17cb750 +DATA ·KoopConst+3544(SB)/8, $0x00000001a2a2e2d2 + +// x^33856 mod p(x), x^33792 mod p(x) +DATA ·KoopConst+3552(SB)/8, $0x000000010e8bb9cc +DATA ·KoopConst+3560(SB)/8, $0x00000001145c9dc2 + +// x^32832 mod p(x), x^32768 mod p(x) +DATA ·KoopConst+3568(SB)/8, $0x00000001859d1cae +DATA ·KoopConst+3576(SB)/8, $0x00000000949e4a48 + +// x^31808 mod p(x), x^31744 mod p(x) +DATA ·KoopConst+3584(SB)/8, $0x0000000167802bbe +DATA ·KoopConst+3592(SB)/8, $0x0000000128beecbc + +// x^30784 mod p(x), x^30720 mod p(x) +DATA ·KoopConst+3600(SB)/8, $0x0000000086f5219c +DATA ·KoopConst+3608(SB)/8, $0x00000001ffc96ae4 + +// x^29760 mod p(x), x^29696 mod p(x) +DATA ·KoopConst+3616(SB)/8, $0x00000001349a4faa +DATA ·KoopConst+3624(SB)/8, $0x00000001ba81e0aa + +// x^28736 mod p(x), x^28672 mod p(x) +DATA ·KoopConst+3632(SB)/8, $0x000000007da3353e +DATA ·KoopConst+3640(SB)/8, $0x0000000104d7df14 + +// x^27712 mod p(x), x^27648 mod p(x) +DATA ·KoopConst+3648(SB)/8, $0x00000000440fba4e +DATA ·KoopConst+3656(SB)/8, $0x00000001c2ff8518 + +// x^26688 mod p(x), x^26624 mod p(x) +DATA ·KoopConst+3664(SB)/8, $0x00000000507aba70 +DATA ·KoopConst+3672(SB)/8, $0x00000000ba6d4708 + +// x^25664 mod p(x), x^25600 mod p(x) +DATA ·KoopConst+3680(SB)/8, $0x0000000015b578b6 +DATA ·KoopConst+3688(SB)/8, $0x00000001d49d4bba + +// x^24640 mod p(x), x^24576 mod p(x) +DATA ·KoopConst+3696(SB)/8, $0x0000000141633fb2 +DATA ·KoopConst+3704(SB)/8, $0x00000000d21247e6 + +// x^23616 mod p(x), x^23552 mod p(x) +DATA ·KoopConst+3712(SB)/8, $0x0000000178712680 +DATA ·KoopConst+3720(SB)/8, $0x0000000063b4004a + +// x^22592 mod p(x), x^22528 mod p(x) +DATA ·KoopConst+3728(SB)/8, $0x000000001404c194 +DATA ·KoopConst+3736(SB)/8, $0x0000000094f55d2c + +// x^21568 mod p(x), x^21504 mod p(x) +DATA ·KoopConst+3744(SB)/8, $0x00000000469dbe46 +DATA ·KoopConst+3752(SB)/8, $0x00000001ca68fe74 + +// x^20544 mod p(x), x^20480 mod p(x) +DATA ·KoopConst+3760(SB)/8, $0x00000000fb093fd8 +DATA ·KoopConst+3768(SB)/8, $0x00000001fd7d1b4c + +// x^19520 mod p(x), x^19456 mod p(x) +DATA ·KoopConst+3776(SB)/8, $0x00000000767a2bfe +DATA ·KoopConst+3784(SB)/8, $0x0000000055982d0c + +// x^18496 mod p(x), x^18432 mod p(x) +DATA ·KoopConst+3792(SB)/8, $0x00000001344e22bc +DATA ·KoopConst+3800(SB)/8, $0x00000000221553a6 + +// x^17472 mod p(x), x^17408 mod p(x) +DATA ·KoopConst+3808(SB)/8, $0x0000000161cd9978 +DATA ·KoopConst+3816(SB)/8, $0x000000013d9a153a + +// x^16448 mod p(x), x^16384 mod p(x) +DATA ·KoopConst+3824(SB)/8, $0x00000001d702e906 +DATA ·KoopConst+3832(SB)/8, $0x00000001cd108b3c + +// x^15424 mod p(x), x^15360 mod p(x) +DATA ·KoopConst+3840(SB)/8, $0x00000001c7db9908 +DATA ·KoopConst+3848(SB)/8, $0x00000001d0af0f4a + +// x^14400 mod p(x), x^14336 mod p(x) +DATA ·KoopConst+3856(SB)/8, $0x00000001665d025c +DATA ·KoopConst+3864(SB)/8, $0x00000001196cf0ec + +// x^13376 mod p(x), x^13312 mod p(x) +DATA ·KoopConst+3872(SB)/8, $0x000000012df97c0e +DATA ·KoopConst+3880(SB)/8, $0x00000001c88c9704 + +// x^12352 mod p(x), x^12288 mod p(x) +DATA ·KoopConst+3888(SB)/8, $0x000000006fed84da +DATA ·KoopConst+3896(SB)/8, $0x000000002013d300 + +// x^11328 mod p(x), x^11264 mod p(x) +DATA ·KoopConst+3904(SB)/8, $0x00000000b094146e +DATA ·KoopConst+3912(SB)/8, $0x00000001c458501e + +// x^10304 mod p(x), x^10240 mod p(x) +DATA ·KoopConst+3920(SB)/8, $0x00000001ceb518a6 +DATA ·KoopConst+3928(SB)/8, $0x000000003ce14802 + +// x^9280 mod p(x), x^9216 mod p(x) +DATA ·KoopConst+3936(SB)/8, $0x000000011f16db0a +DATA ·KoopConst+3944(SB)/8, $0x00000000bb72bb98 + +// x^8256 mod p(x), x^8192 mod p(x) +DATA ·KoopConst+3952(SB)/8, $0x00000001d4aa130e +DATA ·KoopConst+3960(SB)/8, $0x00000000fb9aeaba + +// x^7232 mod p(x), x^7168 mod p(x) +DATA ·KoopConst+3968(SB)/8, $0x00000001991f01d2 +DATA ·KoopConst+3976(SB)/8, $0x000000000131f5e6 + +// x^6208 mod p(x), x^6144 mod p(x) +DATA ·KoopConst+3984(SB)/8, $0x000000006bd58b4c +DATA ·KoopConst+3992(SB)/8, $0x0000000089d5799a + +// x^5184 mod p(x), x^5120 mod p(x) +DATA ·KoopConst+4000(SB)/8, $0x000000007272c166 +DATA ·KoopConst+4008(SB)/8, $0x00000000474c43b0 + +// x^4160 mod p(x), x^4096 mod p(x) +DATA ·KoopConst+4016(SB)/8, $0x000000013974e6f8 +DATA ·KoopConst+4024(SB)/8, $0x00000001db991f34 + +// x^3136 mod p(x), x^3072 mod p(x) +DATA ·KoopConst+4032(SB)/8, $0x000000000bd6e03c +DATA ·KoopConst+4040(SB)/8, $0x000000004b1bfd00 + +// x^2112 mod p(x), x^2048 mod p(x) +DATA ·KoopConst+4048(SB)/8, $0x000000005988c652 +DATA ·KoopConst+4056(SB)/8, $0x000000004036b796 + +// x^1088 mod p(x), x^1024 mod p(x) +DATA ·KoopConst+4064(SB)/8, $0x00000000129ef036 +DATA ·KoopConst+4072(SB)/8, $0x000000000c5ec3d4 + +// x^2048 mod p(x), x^2016 mod p(x), x^1984 mod p(x), x^1952 mod p(x) +DATA ·KoopConst+4080(SB)/8, $0xd6f94847201b5bcb +DATA ·KoopConst+4088(SB)/8, $0x1efc02e79571e892 + +// x^1920 mod p(x), x^1888 mod p(x), x^1856 mod p(x), x^1824 mod p(x) +DATA ·KoopConst+4096(SB)/8, $0xce08adcc294c1393 +DATA ·KoopConst+4104(SB)/8, $0x0b269b5c5ab5f161 + +// x^1792 mod p(x), x^1760 mod p(x), x^1728 mod p(x), x^1696 mod p(x) +DATA ·KoopConst+4112(SB)/8, $0x17315505e4201e72 +DATA ·KoopConst+4120(SB)/8, $0x2e841f4784acf3e9 + +// x^1664 mod p(x), x^1632 mod p(x), x^1600 mod p(x), x^1568 mod p(x) +DATA ·KoopConst+4128(SB)/8, $0x37cfc3a67cc667e3 +DATA ·KoopConst+4136(SB)/8, $0x7020425856bc424b + +// x^1536 mod p(x), x^1504 mod p(x), x^1472 mod p(x), x^1440 mod p(x) +DATA ·KoopConst+4144(SB)/8, $0x8e2fa3369218d2c3 +DATA ·KoopConst+4152(SB)/8, $0xdf81bf923f7c6ef1 + +// x^1408 mod p(x), x^1376 mod p(x), x^1344 mod p(x), x^1312 mod p(x) +DATA ·KoopConst+4160(SB)/8, $0x5ce20d2d39ed1981 +DATA ·KoopConst+4168(SB)/8, $0x9d0898a0af5ddc43 + +// x^1280 mod p(x), x^1248 mod p(x), x^1216 mod p(x), x^1184 mod p(x) +DATA ·KoopConst+4176(SB)/8, $0x6f7f4546ca081e03 +DATA ·KoopConst+4184(SB)/8, $0x4992836903fda047 + +// x^1152 mod p(x), x^1120 mod p(x), x^1088 mod p(x), x^1056 mod p(x) +DATA ·KoopConst+4192(SB)/8, $0xfd4f413b9bf11d68 +DATA ·KoopConst+4200(SB)/8, $0xf4ddf452094f781b + +// x^1024 mod p(x), x^992 mod p(x), x^960 mod p(x), x^928 mod p(x) +DATA ·KoopConst+4208(SB)/8, $0x11d84204062f61ea +DATA ·KoopConst+4216(SB)/8, $0x9487f1e51f3588cf + +// x^896 mod p(x), x^864 mod p(x), x^832 mod p(x), x^800 mod p(x) +DATA ·KoopConst+4224(SB)/8, $0xfaedf111abf58a1f +DATA ·KoopConst+4232(SB)/8, $0x31da2c22b1384ec9 + +// x^768 mod p(x), x^736 mod p(x), x^704 mod p(x), x^672 mod p(x) +DATA ·KoopConst+4240(SB)/8, $0x0246b541e8f81b22 +DATA ·KoopConst+4248(SB)/8, $0xc857ede58a42eb47 + +// x^640 mod p(x), x^608 mod p(x), x^576 mod p(x), x^544 mod p(x) +DATA ·KoopConst+4256(SB)/8, $0xd4dbfa9b92b0372e +DATA ·KoopConst+4264(SB)/8, $0xe0354c0b2cd1c09a + +// x^512 mod p(x), x^480 mod p(x), x^448 mod p(x), x^416 mod p(x) +DATA ·KoopConst+4272(SB)/8, $0x5f36c79cfc4417ec +DATA ·KoopConst+4280(SB)/8, $0x4b92cf8d54b8f25b + +// x^384 mod p(x), x^352 mod p(x), x^320 mod p(x), x^288 mod p(x) +DATA ·KoopConst+4288(SB)/8, $0xdad234918345041e +DATA ·KoopConst+4296(SB)/8, $0x4e44c81828229301 + +// x^256 mod p(x), x^224 mod p(x), x^192 mod p(x), x^160 mod p(x) +DATA ·KoopConst+4304(SB)/8, $0x56fd28cc8e02f1d0 +DATA ·KoopConst+4312(SB)/8, $0x3da5e43c8ee9ee84 + +// x^128 mod p(x), x^96 mod p(x), x^64 mod p(x), x^32 mod p(x) +DATA ·KoopConst+4320(SB)/8, $0xa583017cdfcb9f08 +DATA ·KoopConst+4328(SB)/8, $0xeb31d82e0c62ab26 + +GLOBL ·KoopConst(SB), RODATA, $4336 + +// Barrett constant m - (4^32)/n +DATA ·KoopBarConst(SB)/8, $0x0000000017d232cd +DATA ·KoopBarConst+8(SB)/8, $0x0000000000000000 +DATA ·KoopBarConst+16(SB)/8, $0x00000001d663b05d +DATA ·KoopBarConst+24(SB)/8, $0x0000000000000000 +GLOBL ·KoopBarConst(SB), RODATA, $32 diff --git a/vendor/github.com/klauspost/crc32/gen.go b/vendor/github.com/klauspost/crc32/gen.go new file mode 100644 index 000000000000..fb3040a7dca6 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/gen.go @@ -0,0 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen_const_ppc64le.go + +package crc32 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/minio/crc64nvme/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt rename to vendor/github.com/minio/crc64nvme/LICENSE diff --git a/vendor/github.com/minio/crc64nvme/README.md b/vendor/github.com/minio/crc64nvme/README.md new file mode 100644 index 000000000000..977dfcc88188 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/README.md @@ -0,0 +1,20 @@ + +## crc64nvme + +This Golang package calculates CRC64 checksums using carryless-multiplication accelerated with SIMD instructions for both ARM and x86. It is based on the NVME polynomial as specified in the [NVM Express® NVM Command Set Specification](https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0d-2023.12.28-Ratified.pdf). + +The code is based on the [crc64fast-nvme](https://github.com/awesomized/crc64fast-nvme.git) package in Rust and is released under the Apache 2.0 license. + +For more background on the exact technique used, see this [Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction](https://web.archive.org/web/20131224125630/https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf) paper. + +### Performance + +To follow. + +### Requirements + +All Go versions >= 1.22 are supported. + +### Contributing + +Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/crc64nvme/crc64.go b/vendor/github.com/minio/crc64nvme/crc64.go new file mode 100644 index 000000000000..ca34a48e09e9 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/crc64.go @@ -0,0 +1,185 @@ +// Copyright (c) 2025 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +// Package crc64nvme implements the 64-bit cyclic redundancy check with NVME polynomial. +package crc64nvme + +import ( + "encoding/binary" + "errors" + "hash" + "sync" + "unsafe" +) + +const ( + // The size of a CRC-64 checksum in bytes. + Size = 8 + + // The NVME polynoimial (reversed, as used by Go) + NVME = 0x9a6c9329ac4bc9b5 +) + +var ( + // precalculated table. + nvmeTable = makeTable(NVME) +) + +// table is a 256-word table representing the polynomial for efficient processing. +type table [256]uint64 + +var ( + slicing8TablesBuildOnce sync.Once + slicing8TableNVME *[8]table +) + +func buildSlicing8TablesOnce() { + slicing8TablesBuildOnce.Do(buildSlicing8Tables) +} + +func buildSlicing8Tables() { + slicing8TableNVME = makeSlicingBy8Table(makeTable(NVME)) +} + +func makeTable(poly uint64) *table { + t := new(table) + for i := 0; i < 256; i++ { + crc := uint64(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } + return t +} + +func makeSlicingBy8Table(t *table) *[8]table { + var helperTable [8]table + helperTable[0] = *t + for i := 0; i < 256; i++ { + crc := t[i] + for j := 1; j < 8; j++ { + crc = t[crc&0xff] ^ (crc >> 8) + helperTable[j][i] = crc + } + } + return &helperTable +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 +} + +// New creates a new hash.Hash64 computing the CRC-64 checksum using the +// NVME polynomial. Its Sum method will lay the +// value out in big-endian byte order. The returned Hash64 also +// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to +// marshal and unmarshal the internal state of the hash. +func New() hash.Hash64 { return &digest{0} } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +const ( + magic = "crc\x02" + marshaledSize = len(magic) + 8 + 8 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = binary.BigEndian.AppendUint64(b, tableSum) + b = binary.BigEndian.AppendUint64(b, d.crc) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("hash/crc64: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("hash/crc64: invalid hash state size") + } + if tableSum != binary.BigEndian.Uint64(b[4:]) { + return errors.New("hash/crc64: tables do not match") + } + d.crc = binary.BigEndian.Uint64(b[12:]) + return nil +} + +func update(crc uint64, p []byte) uint64 { + if hasAsm && len(p) > 127 { + ptr := unsafe.Pointer(&p[0]) + if align := (uintptr(ptr)+15)&^0xf - uintptr(ptr); align > 0 { + // Align to 16-byte boundary. + crc = update(crc, p[:align]) + p = p[align:] + } + runs := len(p) / 128 + if hasAsm512 && runs >= 8 { + // Use 512-bit wide instructions for >= 1KB. + crc = updateAsm512(crc, p[:128*runs]) + } else { + crc = updateAsm(crc, p[:128*runs]) + } + return update(crc, p[128*runs:]) + } + + buildSlicing8TablesOnce() + crc = ^crc + // table comparison is somewhat expensive, so avoid it for small sizes + if len(p) >= 64 { + var helperTable = slicing8TableNVME + // Update using slicing-by-8 + for len(p) > 8 { + crc ^= binary.LittleEndian.Uint64(p) + crc = helperTable[7][crc&0xff] ^ + helperTable[6][(crc>>8)&0xff] ^ + helperTable[5][(crc>>16)&0xff] ^ + helperTable[4][(crc>>24)&0xff] ^ + helperTable[3][(crc>>32)&0xff] ^ + helperTable[2][(crc>>40)&0xff] ^ + helperTable[1][(crc>>48)&0xff] ^ + helperTable[0][crc>>56] + p = p[8:] + } + } + // For reminders or small sizes + for _, v := range p { + crc = nvmeTable[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint64, p []byte) uint64 { + return update(crc, p) +} + +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = update(d.crc, p) + return len(p), nil +} + +func (d *digest) Sum64() uint64 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-64 checksum of data +// using the NVME polynomial. +func Checksum(data []byte) uint64 { return update(0, data) } + +// ISO tablesum of NVME poly +const tableSum = 0x8ddd9ee4402c7163 diff --git a/vendor/github.com/minio/crc64nvme/crc64_amd64.go b/vendor/github.com/minio/crc64nvme/crc64_amd64.go new file mode 100644 index 000000000000..c741591a6191 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/crc64_amd64.go @@ -0,0 +1,17 @@ +// Copyright (c) 2025 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//go:build !noasm && !appengine && !gccgo + +package crc64nvme + +import ( + "github.com/klauspost/cpuid/v2" +) + +var hasAsm = cpuid.CPU.Supports(cpuid.SSE2, cpuid.CLMUL, cpuid.SSE4) +var hasAsm512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.VPCLMULQDQ, cpuid.AVX512VL, cpuid.CLMUL) + +func updateAsm(crc uint64, p []byte) (checksum uint64) +func updateAsm512(crc uint64, p []byte) (checksum uint64) diff --git a/vendor/github.com/minio/crc64nvme/crc64_amd64.s b/vendor/github.com/minio/crc64nvme/crc64_amd64.s new file mode 100644 index 000000000000..acfea6a151b2 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/crc64_amd64.s @@ -0,0 +1,307 @@ +// Copyright (c) 2025 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//go:build !noasm && !appengine && !gccgo + +#include "textflag.h" + +TEXT ·updateAsm(SB), $0-40 + MOVQ crc+0(FP), AX // checksum + MOVQ p_base+8(FP), SI // start pointer + MOVQ p_len+16(FP), CX // length of buffer + NOTQ AX + SHRQ $7, CX + CMPQ CX, $1 + JLT skip128 + + VMOVDQA 0x00(SI), X0 + VMOVDQA 0x10(SI), X1 + VMOVDQA 0x20(SI), X2 + VMOVDQA 0x30(SI), X3 + VMOVDQA 0x40(SI), X4 + VMOVDQA 0x50(SI), X5 + VMOVDQA 0x60(SI), X6 + VMOVDQA 0x70(SI), X7 + MOVQ AX, X8 + PXOR X8, X0 + CMPQ CX, $1 + JE tail128 + + MOVQ $0xa1ca681e733f9c40, AX + MOVQ AX, X8 + MOVQ $0x5f852fb61e8d92dc, AX + PINSRQ $0x1, AX, X9 + +loop128: + ADDQ $128, SI + SUBQ $1, CX + VMOVDQA X0, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X0 + PXOR X10, X0 + PXOR 0(SI), X0 + VMOVDQA X1, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X1 + PXOR X10, X1 + PXOR 0x10(SI), X1 + VMOVDQA X2, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X2 + PXOR X10, X2 + PXOR 0x20(SI), X2 + VMOVDQA X3, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X3 + PXOR X10, X3 + PXOR 0x30(SI), X3 + VMOVDQA X4, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X4 + PXOR X10, X4 + PXOR 0x40(SI), X4 + VMOVDQA X5, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X5 + PXOR X10, X5 + PXOR 0x50(SI), X5 + VMOVDQA X6, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X6 + PXOR X10, X6 + PXOR 0x60(SI), X6 + VMOVDQA X7, X10 + PCLMULQDQ $0x00, X8, X10 + PCLMULQDQ $0x11, X9, X7 + PXOR X10, X7 + PXOR 0x70(SI), X7 + CMPQ CX, $1 + JGT loop128 + +tail128: + MOVQ $0xd083dd594d96319d, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X0, X11 + MOVQ $0x946588403d4adcbc, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X0 + PXOR X11, X7 + PXOR X0, X7 + MOVQ $0x3c255f5ebc414423, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X1, X11 + MOVQ $0x34f5a24e22d66e90, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X1 + PXOR X11, X1 + PXOR X7, X1 + MOVQ $0x7b0ab10dd0f809fe, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X2, X11 + MOVQ $0x03363823e6e791e5, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X2 + PXOR X11, X2 + PXOR X1, X2 + MOVQ $0x0c32cdb31e18a84a, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X3, X11 + MOVQ $0x62242240ace5045a, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X3 + PXOR X11, X3 + PXOR X2, X3 + MOVQ $0xbdd7ac0ee1a4a0f0, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X4, X11 + MOVQ $0xa3ffdc1fe8e82a8b, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X4 + PXOR X11, X4 + PXOR X3, X4 + MOVQ $0xb0bc2e589204f500, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X5, X11 + MOVQ $0xe1e0bb9d45d7a44c, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X5 + PXOR X11, X5 + PXOR X4, X5 + MOVQ $0xeadc41fd2ba3d420, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X6, X11 + MOVQ $0x21e9761e252621ac, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X6 + PXOR X11, X6 + PXOR X5, X6 + MOVQ AX, X5 + PCLMULQDQ $0x00, X6, X5 + PSHUFD $0xee, X6, X6 + PXOR X5, X6 + MOVQ $0x27ecfa329aef9f77, AX + MOVQ AX, X4 + PCLMULQDQ $0x00, X4, X6 + PEXTRQ $0, X6, BX + MOVQ $0x34d926535897936b, AX + MOVQ AX, X4 + PCLMULQDQ $0x00, X4, X6 + PXOR X5, X6 + PEXTRQ $1, X6, AX + XORQ BX, AX + +skip128: + NOTQ AX + MOVQ AX, checksum+32(FP) + RET + +// Constants, pre-splatted. +DATA ·asmConstantsPoly<>+0x00(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x08(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x10(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x18(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x20(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x28(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x30(SB)/8, $0xa1ca681e733f9c40 +DATA ·asmConstantsPoly<>+0x38(SB)/8, $0 +// Upper +DATA ·asmConstantsPoly<>+0x40(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x48(SB)/8, $0x5f852fb61e8d92dc +DATA ·asmConstantsPoly<>+0x50(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x58(SB)/8, $0x5f852fb61e8d92dc +DATA ·asmConstantsPoly<>+0x60(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x68(SB)/8, $0x5f852fb61e8d92dc +DATA ·asmConstantsPoly<>+0x70(SB)/8, $0 +DATA ·asmConstantsPoly<>+0x78(SB)/8, $0x5f852fb61e8d92dc +GLOBL ·asmConstantsPoly<>(SB), (NOPTR+RODATA), $128 + +TEXT ·updateAsm512(SB), $0-40 + MOVQ crc+0(FP), AX // checksum + MOVQ p_base+8(FP), SI // start pointer + MOVQ p_len+16(FP), CX // length of buffer + NOTQ AX + SHRQ $7, CX + CMPQ CX, $1 + VPXORQ Z8, Z8, Z8 // Initialize ZMM8 to zero + JLT skip128 + + VMOVDQU64 0x00(SI), Z0 + VMOVDQU64 0x40(SI), Z4 + MOVQ $·asmConstantsPoly<>(SB), BX + VMOVQ AX, X8 + + // XOR initialization value into lower 64 bits of ZMM0 + VPXORQ Z8, Z0, Z0 + CMPQ CX, $1 + JE tail128 + + VMOVDQU64 0(BX), Z8 + VMOVDQU64 64(BX), Z9 + + PCALIGN $16 + +loop128: + VMOVDQU64 0x80(SI), Z1 + VMOVDQU64 0xc0(SI), Z5 + ADDQ $128, SI + + SUBQ $1, CX + VPCLMULQDQ $0x00, Z8, Z0, Z10 + VPCLMULQDQ $0x11, Z9, Z0, Z0 + VPTERNLOGD $0x96, Z1, Z10, Z0 // Combine results with xor into Z0 + + VPCLMULQDQ $0x00, Z8, Z4, Z10 + VPCLMULQDQ $0x11, Z9, Z4, Z4 + VPTERNLOGD $0x96, Z5, Z10, Z4 // Combine results with xor into Z4 + + CMPQ CX, $1 + JGT loop128 + +tail128: + // Extract X0 to X3 from ZMM0 + VEXTRACTF32X4 $1, Z0, X1 // X1: Second 128-bit lane + VEXTRACTF32X4 $2, Z0, X2 // X2: Third 128-bit lane + VEXTRACTF32X4 $3, Z0, X3 // X3: Fourth 128-bit lane + + // Extract X4 to X7 from ZMM4 + VEXTRACTF32X4 $1, Z4, X5 // X5: Second 128-bit lane + VEXTRACTF32X4 $2, Z4, X6 // X6: Third 128-bit lane + VEXTRACTF32X4 $3, Z4, X7 // X7: Fourth 128-bit lane + + MOVQ $0xd083dd594d96319d, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X0, X11 + MOVQ $0x946588403d4adcbc, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X0 + PXOR X11, X7 + PXOR X0, X7 + MOVQ $0x3c255f5ebc414423, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X1, X11 + MOVQ $0x34f5a24e22d66e90, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X1 + PXOR X11, X1 + PXOR X7, X1 + MOVQ $0x7b0ab10dd0f809fe, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X2, X11 + MOVQ $0x03363823e6e791e5, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X2 + PXOR X11, X2 + PXOR X1, X2 + MOVQ $0x0c32cdb31e18a84a, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X3, X11 + MOVQ $0x62242240ace5045a, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X3 + PXOR X11, X3 + PXOR X2, X3 + MOVQ $0xbdd7ac0ee1a4a0f0, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X4, X11 + MOVQ $0xa3ffdc1fe8e82a8b, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X4 + PXOR X11, X4 + PXOR X3, X4 + MOVQ $0xb0bc2e589204f500, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X5, X11 + MOVQ $0xe1e0bb9d45d7a44c, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X5 + PXOR X11, X5 + PXOR X4, X5 + MOVQ $0xeadc41fd2ba3d420, AX + MOVQ AX, X11 + PCLMULQDQ $0x00, X6, X11 + MOVQ $0x21e9761e252621ac, AX + PINSRQ $0x1, AX, X12 + PCLMULQDQ $0x11, X12, X6 + PXOR X11, X6 + PXOR X5, X6 + MOVQ AX, X5 + PCLMULQDQ $0x00, X6, X5 + PSHUFD $0xee, X6, X6 + PXOR X5, X6 + MOVQ $0x27ecfa329aef9f77, AX + MOVQ AX, X4 + PCLMULQDQ $0x00, X4, X6 + PEXTRQ $0, X6, BX + MOVQ $0x34d926535897936b, AX + MOVQ AX, X4 + PCLMULQDQ $0x00, X4, X6 + PXOR X5, X6 + PEXTRQ $1, X6, AX + XORQ BX, AX + +skip128: + NOTQ AX + MOVQ AX, checksum+32(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/minio/crc64nvme/crc64_arm64.go b/vendor/github.com/minio/crc64nvme/crc64_arm64.go new file mode 100644 index 000000000000..7e3ea9134592 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/crc64_arm64.go @@ -0,0 +1,17 @@ +// Copyright (c) 2025 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//go:build !noasm && !appengine && !gccgo + +package crc64nvme + +import ( + "github.com/klauspost/cpuid/v2" +) + +var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD, cpuid.PMULL, cpuid.SHA3) +var hasAsm512 = false + +func updateAsm(crc uint64, p []byte) (checksum uint64) +func updateAsm512(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } diff --git a/vendor/github.com/minio/crc64nvme/crc64_arm64.s b/vendor/github.com/minio/crc64nvme/crc64_arm64.s new file mode 100644 index 000000000000..229a10fb7347 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/crc64_arm64.s @@ -0,0 +1,157 @@ +// Copyright (c) 2025 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//go:build !noasm && !appengine && !gccgo + +#include "textflag.h" + +TEXT ·updateAsm(SB), $0-40 + MOVD crc+0(FP), R0 // checksum + MOVD p_base+8(FP), R1 // start pointer + MOVD p_len+16(FP), R2 // length of buffer + MOVD $·const(SB), R3 // constants + MVN R0, R0 + LSR $7, R2, R2 + CMP $1, R2 + BLT skip128 + + FLDPQ (R1), (F0, F1) + FLDPQ 32(R1), (F2, F3) + FLDPQ 64(R1), (F4, F5) + FLDPQ 96(R1), (F6, F7) + FMOVD R0, F8 + VMOVI $0, V9.B16 + VMOV V9.D[0], V8.D[1] + VEOR V8.B16, V0.B16, V0.B16 + CMP $1, R2 + BEQ tail128 + + MOVD 112(R3), R4 + MOVD 120(R3), R5 + FMOVD R4, F8 + VDUP R5, V9.D2 + +loop128: + ADD $128, R1, R1 + SUB $1, R2, R2 + VPMULL V0.D1, V8.D1, V10.Q1 + VPMULL2 V0.D2, V9.D2, V0.Q1 + FLDPQ (R1), (F11, F12) + VEOR3 V0.B16, V11.B16, V10.B16, V0.B16 + VPMULL V1.D1, V8.D1, V10.Q1 + VPMULL2 V1.D2, V9.D2, V1.Q1 + VEOR3 V1.B16, V12.B16, V10.B16, V1.B16 + VPMULL V2.D1, V8.D1, V10.Q1 + VPMULL2 V2.D2, V9.D2, V2.Q1 + FLDPQ 32(R1), (F11, F12) + VEOR3 V2.B16, V11.B16, V10.B16, V2.B16 + VPMULL V3.D1, V8.D1, V10.Q1 + VPMULL2 V3.D2, V9.D2, V3.Q1 + VEOR3 V3.B16, V12.B16, V10.B16, V3.B16 + VPMULL V4.D1, V8.D1, V10.Q1 + VPMULL2 V4.D2, V9.D2, V4.Q1 + FLDPQ 64(R1), (F11, F12) + VEOR3 V4.B16, V11.B16, V10.B16, V4.B16 + VPMULL V5.D1, V8.D1, V10.Q1 + VPMULL2 V5.D2, V9.D2, V5.Q1 + VEOR3 V5.B16, V12.B16, V10.B16, V5.B16 + VPMULL V6.D1, V8.D1, V10.Q1 + VPMULL2 V6.D2, V9.D2, V6.Q1 + FLDPQ 96(R1), (F11, F12) + VEOR3 V6.B16, V11.B16, V10.B16, V6.B16 + VPMULL V7.D1, V8.D1, V10.Q1 + VPMULL2 V7.D2, V9.D2, V7.Q1 + VEOR3 V7.B16, V12.B16, V10.B16, V7.B16 + CMP $1, R2 + BHI loop128 + +tail128: + MOVD (R3), R4 + FMOVD R4, F11 + VPMULL V0.D1, V11.D1, V11.Q1 + MOVD 8(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V0.D2, V12.D2, V0.Q1 + VEOR3 V0.B16, V7.B16, V11.B16, V7.B16 + MOVD 16(R3), R4 + FMOVD R4, F11 + VPMULL V1.D1, V11.D1, V11.Q1 + MOVD 24(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V1.D2, V12.D2, V1.Q1 + VEOR3 V1.B16, V11.B16, V7.B16, V1.B16 + MOVD 32(R3), R4 + FMOVD R4, F11 + VPMULL V2.D1, V11.D1, V11.Q1 + MOVD 40(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V2.D2, V12.D2, V2.Q1 + VEOR3 V2.B16, V11.B16, V1.B16, V2.B16 + MOVD 48(R3), R4 + FMOVD R4, F11 + VPMULL V3.D1, V11.D1, V11.Q1 + MOVD 56(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V3.D2, V12.D2, V3.Q1 + VEOR3 V3.B16, V11.B16, V2.B16, V3.B16 + MOVD 64(R3), R4 + FMOVD R4, F11 + VPMULL V4.D1, V11.D1, V11.Q1 + MOVD 72(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V4.D2, V12.D2, V4.Q1 + VEOR3 V4.B16, V11.B16, V3.B16, V4.B16 + MOVD 80(R3), R4 + FMOVD R4, F11 + VPMULL V5.D1, V11.D1, V11.Q1 + MOVD 88(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V5.D2, V12.D2, V5.Q1 + VEOR3 V5.B16, V11.B16, V4.B16, V5.B16 + MOVD 96(R3), R4 + FMOVD R4, F11 + VPMULL V6.D1, V11.D1, V11.Q1 + MOVD 104(R3), R4 + VDUP R4, V12.D2 + VPMULL2 V6.D2, V12.D2, V6.Q1 + VEOR3 V6.B16, V11.B16, V5.B16, V6.B16 + FMOVD R4, F5 + VPMULL V6.D1, V5.D1, V5.Q1 + VDUP V6.D[1], V6.D2 + VEOR V5.B8, V6.B8, V6.B8 + MOVD 128(R3), R4 + FMOVD R4, F4 + VPMULL V4.D1, V6.D1, V6.Q1 + FMOVD F6, R4 + MOVD 136(R3), R5 + FMOVD R5, F4 + VPMULL V4.D1, V6.D1, V6.Q1 + VEOR V6.B16, V5.B16, V6.B16 + VMOV V6.D[1], R5 + EOR R4, R5, R0 + +skip128: + MVN R0, R0 + MOVD R0, checksum+32(FP) + RET + +DATA ·const+0x000(SB)/8, $0xd083dd594d96319d // K_959 +DATA ·const+0x008(SB)/8, $0x946588403d4adcbc // K_895 +DATA ·const+0x010(SB)/8, $0x3c255f5ebc414423 // K_831 +DATA ·const+0x018(SB)/8, $0x34f5a24e22d66e90 // K_767 +DATA ·const+0x020(SB)/8, $0x7b0ab10dd0f809fe // K_703 +DATA ·const+0x028(SB)/8, $0x03363823e6e791e5 // K_639 +DATA ·const+0x030(SB)/8, $0x0c32cdb31e18a84a // K_575 +DATA ·const+0x038(SB)/8, $0x62242240ace5045a // K_511 +DATA ·const+0x040(SB)/8, $0xbdd7ac0ee1a4a0f0 // K_447 +DATA ·const+0x048(SB)/8, $0xa3ffdc1fe8e82a8b // K_383 +DATA ·const+0x050(SB)/8, $0xb0bc2e589204f500 // K_319 +DATA ·const+0x058(SB)/8, $0xe1e0bb9d45d7a44c // K_255 +DATA ·const+0x060(SB)/8, $0xeadc41fd2ba3d420 // K_191 +DATA ·const+0x068(SB)/8, $0x21e9761e252621ac // K_127 +DATA ·const+0x070(SB)/8, $0xa1ca681e733f9c40 // K_1087 +DATA ·const+0x078(SB)/8, $0x5f852fb61e8d92dc // K_1023 +DATA ·const+0x080(SB)/8, $0x27ecfa329aef9f77 // MU +DATA ·const+0x088(SB)/8, $0x34d926535897936b // POLY +GLOBL ·const(SB), (NOPTR+RODATA), $144 diff --git a/vendor/github.com/minio/crc64nvme/crc64_other.go b/vendor/github.com/minio/crc64nvme/crc64_other.go new file mode 100644 index 000000000000..ae260f7fbdb5 --- /dev/null +++ b/vendor/github.com/minio/crc64nvme/crc64_other.go @@ -0,0 +1,13 @@ +// Copyright (c) 2025 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//go:build (!amd64 || noasm || appengine || gccgo) && (!arm64 || noasm || appengine || gccgo) + +package crc64nvme + +var hasAsm = false +var hasAsm512 = false + +func updateAsm(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } +func updateAsm512(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt b/vendor/github.com/minio/md5-simd/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt rename to vendor/github.com/minio/md5-simd/LICENSE diff --git a/vendor/github.com/minio/md5-simd/LICENSE.Golang b/vendor/github.com/minio/md5-simd/LICENSE.Golang new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/vendor/github.com/minio/md5-simd/LICENSE.Golang @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/minio/md5-simd/README.md b/vendor/github.com/minio/md5-simd/README.md new file mode 100644 index 000000000000..fa6fce1a4737 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/README.md @@ -0,0 +1,198 @@ + +# md5-simd + +This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core. + +It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512. + +`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load. + +It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum. +Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core, +thereby making more efficient usage of the computing resources. + +## Usage + +[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc) + + +In order to use `md5-simd`, you must first create an `Server` which can be +used to instantiate one or more objects for MD5 hashing. + +These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface +and as such the normal Write/Reset/Sum functionality works as expected. + +As an example: +``` + // Create server + server := md5simd.NewServer() + defer server.Close() + + // Create hashing object (conforming to hash.Hash) + md5Hash := server.NewHash() + defer md5Hash.Close() + + // Write one (or more) blocks + md5Hash.Write(block) + + // Return digest + digest := md5Hash.Sum([]byte{}) +``` + +To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server) +and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should +be closed using the `Close()` function when no longer needed. + +A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality. + +In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing. + +## Limitations + +As explained above `md5-simd` does not speed up an individual MD5 hash sum computation, +unless some hierarchical tree construct is used but this will result in different outcomes. +Running a single hash on a server results in approximately half the throughput. + +Instead, it allows running multiple MD5 calculations in parallel on a single CPU core. +This can be beneficial in e.g. multi-threaded server applications where many go-routines +are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core. + +This will result in a lower overall CPU usage as compared to using the standard `crypto/md5` +functionality where each MD5 hash computation will consume a single thread (core). + +It is best to test and measure the overall CPU usage in a representative usage scenario in your application +to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load. + +Also note that `md5-simd` is best meant to work with large objects, +so if your application only hashes small objects of a few kilobytes +you may be better of by using `crypto/md5`. + +## Performance + +For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB. +To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize) +can be inserted if you are unsure of the sizes of the writes. +Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash. + +A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2). +In situations where it is likely that more than 16 streams are fully loaded it may be beneficial +to use multiple servers. + +The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code: + +![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png) + +Compared to `crypto/md5`, the AVX2 version is up to 4x faster: + +``` +$ benchcmp crypto-md5.txt avx2.txt +benchmark old MB/s new MB/s speedup +BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x +BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x +BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x +BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x +BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x +BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x +BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x +BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x +BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x +``` + +Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes): + +``` +$ benchcmp crypto-md5.txt avx512.txt +benchmark old MB/s new MB/s speedup +BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x +BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x +BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x +BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x +BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x +BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x +BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x +BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x +BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x +``` + +These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz. + +If only one or two inputs are available the scalar calculation method will be used for the +optimal speed in these cases. + +## Operation + +To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows: + +![server-architecture](chart/server-architecture.png) + +The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results. + +Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written. + +![server-lanes-example](chart/server-lanes-example.png) + +In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance. + +For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes. +So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes. + + +## Design & Tech + +md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions: +``` +//go:noescape +func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) + +//go:noescape +func block16(state *uint32, ptrs *int64, mask uint64, n int) +``` + +The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes. + +The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications. + +### Caching in upper ZMM registers + +The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`. + +Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function. + +### Direct loading using 64-bit pointers + +The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices. + +As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code. + +For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero). + +Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS). + +### Masking support + +Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers. +For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations. + +### Minor optimizations + +The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction. + +Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up. + +## Low level block function performance + +The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison. + +``` +BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op +BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op +BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op +``` + +## License + +`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file diff --git a/vendor/github.com/minio/md5-simd/block16_amd64.s b/vendor/github.com/minio/md5-simd/block16_amd64.s new file mode 100644 index 000000000000..be0a43a3b192 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block16_amd64.s @@ -0,0 +1,228 @@ +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//+build !noasm,!appengine,gc + +// This is the AVX512 implementation of the MD5 block function (16-way parallel) + +#define prep(index) \ + KMOVQ kmask, ktmp \ + VPGATHERDD index*4(base)(ptrs*1), ktmp, mem + +#define ROUND1(a, b, c, d, index, const, shift) \ + VPXORQ c, tmp, tmp \ + VPADDD 64*const(consts), a, a \ + VPADDD mem, a, a \ + VPTERNLOGD $0x6C, b, d, tmp \ + prep(index) \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND1noload(a, b, c, d, const, shift) \ + VPXORQ c, tmp, tmp \ + VPADDD 64*const(consts), a, a \ + VPADDD mem, a, a \ + VPTERNLOGD $0x6C, b, d, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND2(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VANDNPD c, tmp, tmp \ + VPTERNLOGD $0xEC, b, tmp, tmp2 \ + VMOVAPD c, tmp \ + VPADDD tmp2, a, a \ + VMOVAPD c, tmp2 \ + VPROLD $shift, a, a \ + VPADDD b, a, a + +#define ROUND3(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VPTERNLOGD $0x96, b, d, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD b, tmp \ + VPADDD b, a, a + +#define ROUND4(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VPTERNLOGD $0x36, b, c, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VPXORQ c, ones, tmp \ + VPADDD b, a, a + +TEXT ·block16(SB), 4, $0-40 + + MOVQ state+0(FP), BX + MOVQ base+8(FP), SI + MOVQ ptrs+16(FP), AX + KMOVQ mask+24(FP), K1 + MOVQ n+32(FP), DX + MOVQ ·avx512md5consts+0(SB), DI + +#define a Z0 +#define b Z1 +#define c Z2 +#define d Z3 + +#define sa Z4 +#define sb Z5 +#define sc Z6 +#define sd Z7 + +#define tmp Z8 +#define tmp2 Z9 +#define ptrs Z10 +#define ones Z12 +#define mem Z15 + +#define kmask K1 +#define ktmp K3 + +// ---------------------------------------------------------- +// Registers Z16 through to Z31 are used for caching purposes +// ---------------------------------------------------------- + +#define dig BX +#define count DX +#define base SI +#define consts DI + + // load digest into state registers + VMOVUPD (dig), a + VMOVUPD 0x40(dig), b + VMOVUPD 0x80(dig), c + VMOVUPD 0xc0(dig), d + + // load source pointers + VMOVUPD 0x00(AX), ptrs + + MOVQ $-1, AX + VPBROADCASTQ AX, ones + +loop: + VMOVAPD a, sa + VMOVAPD b, sb + VMOVAPD c, sc + VMOVAPD d, sd + + prep(0) + VMOVAPD d, tmp + VMOVAPD mem, Z16 + + ROUND1(a,b,c,d, 1,0x00, 7) + VMOVAPD mem, Z17 + ROUND1(d,a,b,c, 2,0x01,12) + VMOVAPD mem, Z18 + ROUND1(c,d,a,b, 3,0x02,17) + VMOVAPD mem, Z19 + ROUND1(b,c,d,a, 4,0x03,22) + VMOVAPD mem, Z20 + ROUND1(a,b,c,d, 5,0x04, 7) + VMOVAPD mem, Z21 + ROUND1(d,a,b,c, 6,0x05,12) + VMOVAPD mem, Z22 + ROUND1(c,d,a,b, 7,0x06,17) + VMOVAPD mem, Z23 + ROUND1(b,c,d,a, 8,0x07,22) + VMOVAPD mem, Z24 + ROUND1(a,b,c,d, 9,0x08, 7) + VMOVAPD mem, Z25 + ROUND1(d,a,b,c,10,0x09,12) + VMOVAPD mem, Z26 + ROUND1(c,d,a,b,11,0x0a,17) + VMOVAPD mem, Z27 + ROUND1(b,c,d,a,12,0x0b,22) + VMOVAPD mem, Z28 + ROUND1(a,b,c,d,13,0x0c, 7) + VMOVAPD mem, Z29 + ROUND1(d,a,b,c,14,0x0d,12) + VMOVAPD mem, Z30 + ROUND1(c,d,a,b,15,0x0e,17) + VMOVAPD mem, Z31 + + ROUND1noload(b,c,d,a, 0x0f,22) + + VMOVAPD d, tmp + VMOVAPD d, tmp2 + + ROUND2(a,b,c,d, Z17,0x10, 5) + ROUND2(d,a,b,c, Z22,0x11, 9) + ROUND2(c,d,a,b, Z27,0x12,14) + ROUND2(b,c,d,a, Z16,0x13,20) + ROUND2(a,b,c,d, Z21,0x14, 5) + ROUND2(d,a,b,c, Z26,0x15, 9) + ROUND2(c,d,a,b, Z31,0x16,14) + ROUND2(b,c,d,a, Z20,0x17,20) + ROUND2(a,b,c,d, Z25,0x18, 5) + ROUND2(d,a,b,c, Z30,0x19, 9) + ROUND2(c,d,a,b, Z19,0x1a,14) + ROUND2(b,c,d,a, Z24,0x1b,20) + ROUND2(a,b,c,d, Z29,0x1c, 5) + ROUND2(d,a,b,c, Z18,0x1d, 9) + ROUND2(c,d,a,b, Z23,0x1e,14) + ROUND2(b,c,d,a, Z28,0x1f,20) + + VMOVAPD c, tmp + + ROUND3(a,b,c,d, Z21,0x20, 4) + ROUND3(d,a,b,c, Z24,0x21,11) + ROUND3(c,d,a,b, Z27,0x22,16) + ROUND3(b,c,d,a, Z30,0x23,23) + ROUND3(a,b,c,d, Z17,0x24, 4) + ROUND3(d,a,b,c, Z20,0x25,11) + ROUND3(c,d,a,b, Z23,0x26,16) + ROUND3(b,c,d,a, Z26,0x27,23) + ROUND3(a,b,c,d, Z29,0x28, 4) + ROUND3(d,a,b,c, Z16,0x29,11) + ROUND3(c,d,a,b, Z19,0x2a,16) + ROUND3(b,c,d,a, Z22,0x2b,23) + ROUND3(a,b,c,d, Z25,0x2c, 4) + ROUND3(d,a,b,c, Z28,0x2d,11) + ROUND3(c,d,a,b, Z31,0x2e,16) + ROUND3(b,c,d,a, Z18,0x2f,23) + + VPXORQ d, ones, tmp + + ROUND4(a,b,c,d, Z16,0x30, 6) + ROUND4(d,a,b,c, Z23,0x31,10) + ROUND4(c,d,a,b, Z30,0x32,15) + ROUND4(b,c,d,a, Z21,0x33,21) + ROUND4(a,b,c,d, Z28,0x34, 6) + ROUND4(d,a,b,c, Z19,0x35,10) + ROUND4(c,d,a,b, Z26,0x36,15) + ROUND4(b,c,d,a, Z17,0x37,21) + ROUND4(a,b,c,d, Z24,0x38, 6) + ROUND4(d,a,b,c, Z31,0x39,10) + ROUND4(c,d,a,b, Z22,0x3a,15) + ROUND4(b,c,d,a, Z29,0x3b,21) + ROUND4(a,b,c,d, Z20,0x3c, 6) + ROUND4(d,a,b,c, Z27,0x3d,10) + ROUND4(c,d,a,b, Z18,0x3e,15) + ROUND4(b,c,d,a, Z25,0x3f,21) + + VPADDD sa, a, a + VPADDD sb, b, b + VPADDD sc, c, c + VPADDD sd, d, d + + LEAQ 64(base), base + SUBQ $64, count + JNE loop + + VMOVUPD a, (dig) + VMOVUPD b, 0x40(dig) + VMOVUPD c, 0x80(dig) + VMOVUPD d, 0xc0(dig) + + VZEROUPPER + RET diff --git a/vendor/github.com/minio/md5-simd/block8_amd64.s b/vendor/github.com/minio/md5-simd/block8_amd64.s new file mode 100644 index 000000000000..f57db17aa315 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block8_amd64.s @@ -0,0 +1,281 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2018 Igneous Systems +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +// This is the AVX2 implementation of the MD5 block function (8-way parallel) + +// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int) +TEXT ·block8(SB), 4, $0-40 + MOVQ state+0(FP), BX + MOVQ base+8(FP), SI + MOVQ bufs+16(FP), AX + MOVQ cache+24(FP), CX + MOVQ n+32(FP), DX + MOVQ ·avx256md5consts+0(SB), DI + + // Align cache (which is stack allocated by the compiler) + // to a 256 bit boundary (ymm register alignment) + // The cache8 type is deliberately oversized to permit this. + ADDQ $31, CX + ANDB $-32, CL + +#define a Y0 +#define b Y1 +#define c Y2 +#define d Y3 + +#define sa Y4 +#define sb Y5 +#define sc Y6 +#define sd Y7 + +#define tmp Y8 +#define tmp2 Y9 + +#define mask Y10 +#define off Y11 + +#define ones Y12 + +#define rtmp1 Y13 +#define rtmp2 Y14 + +#define mem Y15 + +#define dig BX +#define cache CX +#define count DX +#define base SI +#define consts DI + +#define prepmask \ + VPXOR mask, mask, mask \ + VPCMPGTD mask, off, mask + +#define prep(index) \ + VMOVAPD mask, rtmp2 \ + VPGATHERDD rtmp2, index*4(base)(off*1), mem + +#define load(index) \ + VMOVAPD index*32(cache), mem + +#define store(index) \ + VMOVAPD mem, index*32(cache) + +#define roll(shift, a) \ + VPSLLD $shift, a, rtmp1 \ + VPSRLD $32-shift, a, a \ + VPOR rtmp1, a, a + +#define ROUND1(a, b, c, d, index, const, shift) \ + VPXOR c, tmp, tmp \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp, tmp \ + VPXOR d, tmp, tmp \ + prep(index) \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND1load(a, b, c, d, index, const, shift) \ + VXORPD c, tmp, tmp \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp, tmp \ + VPXOR d, tmp, tmp \ + load(index) \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND2(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp2, tmp2 \ + VANDNPD c, tmp, tmp \ + load(index) \ + VPOR tmp, tmp2, tmp2 \ + VMOVAPD c, tmp \ + VPADDD tmp2, a, a \ + VMOVAPD c, tmp2 \ + roll(shift,a) \ + VPADDD b, a, a + +#define ROUND3(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + load(index) \ + VPXOR d, tmp, tmp \ + VPXOR b, tmp, tmp \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD b, tmp \ + VPADDD b, a, a + +#define ROUND4(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPOR b, tmp, tmp \ + VPXOR c, tmp, tmp \ + VPADDD tmp, a, a \ + load(index) \ + roll(shift,a) \ + VPXOR c, ones, tmp \ + VPADDD b, a, a + + // load digest into state registers + VMOVUPD (dig), a + VMOVUPD 32(dig), b + VMOVUPD 64(dig), c + VMOVUPD 96(dig), d + + // load source buffer offsets + VMOVUPD (AX), off + + prepmask + VPCMPEQD ones, ones, ones + +loop: + VMOVAPD a, sa + VMOVAPD b, sb + VMOVAPD c, sc + VMOVAPD d, sd + + prep(0) + VMOVAPD d, tmp + store(0) + + ROUND1(a,b,c,d, 1,0x00, 7) + store(1) + ROUND1(d,a,b,c, 2,0x01,12) + store(2) + ROUND1(c,d,a,b, 3,0x02,17) + store(3) + ROUND1(b,c,d,a, 4,0x03,22) + store(4) + ROUND1(a,b,c,d, 5,0x04, 7) + store(5) + ROUND1(d,a,b,c, 6,0x05,12) + store(6) + ROUND1(c,d,a,b, 7,0x06,17) + store(7) + ROUND1(b,c,d,a, 8,0x07,22) + store(8) + ROUND1(a,b,c,d, 9,0x08, 7) + store(9) + ROUND1(d,a,b,c,10,0x09,12) + store(10) + ROUND1(c,d,a,b,11,0x0a,17) + store(11) + ROUND1(b,c,d,a,12,0x0b,22) + store(12) + ROUND1(a,b,c,d,13,0x0c, 7) + store(13) + ROUND1(d,a,b,c,14,0x0d,12) + store(14) + ROUND1(c,d,a,b,15,0x0e,17) + store(15) + ROUND1load(b,c,d,a, 1,0x0f,22) + + VMOVAPD d, tmp + VMOVAPD d, tmp2 + + ROUND2(a,b,c,d, 6,0x10, 5) + ROUND2(d,a,b,c,11,0x11, 9) + ROUND2(c,d,a,b, 0,0x12,14) + ROUND2(b,c,d,a, 5,0x13,20) + ROUND2(a,b,c,d,10,0x14, 5) + ROUND2(d,a,b,c,15,0x15, 9) + ROUND2(c,d,a,b, 4,0x16,14) + ROUND2(b,c,d,a, 9,0x17,20) + ROUND2(a,b,c,d,14,0x18, 5) + ROUND2(d,a,b,c, 3,0x19, 9) + ROUND2(c,d,a,b, 8,0x1a,14) + ROUND2(b,c,d,a,13,0x1b,20) + ROUND2(a,b,c,d, 2,0x1c, 5) + ROUND2(d,a,b,c, 7,0x1d, 9) + ROUND2(c,d,a,b,12,0x1e,14) + ROUND2(b,c,d,a, 0,0x1f,20) + + load(5) + VMOVAPD c, tmp + + ROUND3(a,b,c,d, 8,0x20, 4) + ROUND3(d,a,b,c,11,0x21,11) + ROUND3(c,d,a,b,14,0x22,16) + ROUND3(b,c,d,a, 1,0x23,23) + ROUND3(a,b,c,d, 4,0x24, 4) + ROUND3(d,a,b,c, 7,0x25,11) + ROUND3(c,d,a,b,10,0x26,16) + ROUND3(b,c,d,a,13,0x27,23) + ROUND3(a,b,c,d, 0,0x28, 4) + ROUND3(d,a,b,c, 3,0x29,11) + ROUND3(c,d,a,b, 6,0x2a,16) + ROUND3(b,c,d,a, 9,0x2b,23) + ROUND3(a,b,c,d,12,0x2c, 4) + ROUND3(d,a,b,c,15,0x2d,11) + ROUND3(c,d,a,b, 2,0x2e,16) + ROUND3(b,c,d,a, 0,0x2f,23) + + load(0) + VPXOR d, ones, tmp + + ROUND4(a,b,c,d, 7,0x30, 6) + ROUND4(d,a,b,c,14,0x31,10) + ROUND4(c,d,a,b, 5,0x32,15) + ROUND4(b,c,d,a,12,0x33,21) + ROUND4(a,b,c,d, 3,0x34, 6) + ROUND4(d,a,b,c,10,0x35,10) + ROUND4(c,d,a,b, 1,0x36,15) + ROUND4(b,c,d,a, 8,0x37,21) + ROUND4(a,b,c,d,15,0x38, 6) + ROUND4(d,a,b,c, 6,0x39,10) + ROUND4(c,d,a,b,13,0x3a,15) + ROUND4(b,c,d,a, 4,0x3b,21) + ROUND4(a,b,c,d,11,0x3c, 6) + ROUND4(d,a,b,c, 2,0x3d,10) + ROUND4(c,d,a,b, 9,0x3e,15) + ROUND4(b,c,d,a, 0,0x3f,21) + + VPADDD sa, a, a + VPADDD sb, b, b + VPADDD sc, c, c + VPADDD sd, d, d + + LEAQ 64(base), base + SUBQ $64, count + JNE loop + + VMOVUPD a, (dig) + VMOVUPD b, 32(dig) + VMOVUPD c, 64(dig) + VMOVUPD d, 96(dig) + + VZEROUPPER + RET diff --git a/vendor/github.com/minio/md5-simd/block_amd64.go b/vendor/github.com/minio/md5-simd/block_amd64.go new file mode 100644 index 000000000000..16edda2689c6 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block_amd64.go @@ -0,0 +1,210 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +import ( + "fmt" + "math" + "unsafe" + + "github.com/klauspost/cpuid/v2" +) + +var hasAVX512 bool + +func init() { + // VANDNPD requires AVX512DQ. Technically it could be VPTERNLOGQ which is AVX512F. + hasAVX512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ) +} + +//go:noescape +func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) + +//go:noescape +func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int) + +// 8-way 4x uint32 digests in 4 ymm registers +// (ymm0, ymm1, ymm2, ymm3) +type digest8 struct { + v0, v1, v2, v3 [8]uint32 +} + +// Stack cache for 8x64 byte md5.BlockSize bytes. +// Must be 32-byte aligned, so allocate 512+32 and +// align upwards at runtime. +type cache8 [512 + 32]byte + +// MD5 magic numbers for one lane of hashing; inflated +// 8x below at init time. +var md5consts = [64]uint32{ + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, + 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, +} + +// inflate the consts 8-way for 8x md5 (256 bit ymm registers) +var avx256md5consts = func(c []uint32) []uint32 { + inf := make([]uint32, 8*len(c)) + for i := range c { + for j := 0; j < 8; j++ { + inf[(i*8)+j] = c[i] + } + } + return inf +}(md5consts[:]) + +// 16-way 4x uint32 digests in 4 zmm registers +type digest16 struct { + v0, v1, v2, v3 [16]uint32 +} + +// inflate the consts 16-way for 16x md5 (512 bit zmm registers) +var avx512md5consts = func(c []uint32) []uint32 { + inf := make([]uint32, 16*len(c)) + for i := range c { + for j := 0; j < 16; j++ { + inf[(i*16)+j] = c[i] + } + } + return inf +}(md5consts[:]) + +// Interface function to assembly code +func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) { + if hasAVX512 { + blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16) + return + } + + // Preparing data using copy is slower since copies aren't inlined. + + // Calculate on this goroutine + if half { + for i := range s.i8[0][:] { + s.i8[0][i] = input[i] + } + for i := range s.d8a.v0[:] { + s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] + } + blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a) + for i := range s.d8a.v0[:] { + d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] + } + return + } + + for i := range s.i8[0][:] { + s.i8[0][i], s.i8[1][i] = input[i], input[8+i] + } + + for i := range s.d8a.v0[:] { + j := (i + 8) & 15 + s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] + s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j] + } + + // Benchmarks appears to be slightly faster when spinning up 2 goroutines instead + // of using the current for one of the blocks. + s.wg.Add(2) + go func() { blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a); s.wg.Done() }() + go func() { blockMd5_avx2(&s.d8b, s.i8[1], s.allBufs, &s.maskRounds8b); s.wg.Done() }() + s.wg.Wait() + for i := range s.d8a.v0[:] { + d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] + } + for i := range s.d8b.v0[:] { + j := (i + 8) & 15 + d.v0[j], d.v1[j], d.v2[j], d.v3[j] = s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] + } +} + +// Interface function to AVX512 assembly code +func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) { + baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) + ptrs := [16]int32{} + + for i := range ptrs { + if len(input[i]) > 0 { + if len(input[i]) > internalBlockSize { + panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) + } + + off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin + if off > math.MaxUint32 { + panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) + } + ptrs[i] = int32(off) + } + } + + sdup := *s // create copy of initial states to receive intermediate updates + + rounds := generateMaskAndRounds16(input, maskRounds) + + for r := 0; r < rounds; r++ { + m := maskRounds[r] + + block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds)) + + for j := 0; j < len(ptrs); j++ { + ptrs[j] += int32(64 * m.rounds) // update pointers for next round + if m.mask&(1< 0 { + if len(input[i]) > internalBlockSize { + panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) + } + + off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin + if off > math.MaxUint32 { + panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) + } + ptrs[i] = int32(off) + } + } + + sdup := *s // create copy of initial states to receive intermediate updates + + rounds := generateMaskAndRounds8(input, maskRounds) + + for r := 0; r < rounds; r++ { + m := maskRounds[r] + var cache cache8 // stack storage for block8 tmp state + block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds)) + + for j := 0; j < len(ptrs); j++ { + ptrs[j] += int32(64 * m.rounds) // update pointers for next round + if m.mask&(1< internalBlockSize { + l = internalBlockSize + } + nnn, err := d.write(p[:l]) + if err != nil { + return nn, err + } + nn += nnn + p = p[l:] + + if len(p) == 0 { + break + } + + } + return +} + +func (d *md5Digest) write(p []byte) (nn int, err error) { + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == BlockSize { + // Create a copy of the overflow buffer in order to send it async over the channel + // (since we will modify the overflow buffer down below with any access beyond multiples of 64) + tmp := <-d.buffers + tmp = tmp[:BlockSize] + copy(tmp, d.x[:]) + d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= BlockSize { + n := len(p) &^ (BlockSize - 1) + buf := <-d.buffers + buf = buf[:n] + copy(buf, p) + d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d *md5Digest) Close() { + if d.blocksCh != nil { + close(d.blocksCh) + d.blocksCh = nil + } +} + +var sumChPool sync.Pool + +func init() { + sumChPool.New = func() interface{} { + return make(chan sumResult, 1) + } +} + +// Sum - Return MD5 sum in bytes +func (d *md5Digest) Sum(in []byte) (result []byte) { + if d.blocksCh == nil { + panic("sum after close") + } + + trail := <-d.buffers + trail = append(trail[:0], d.x[:d.nx]...) + + length := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if length%64 < 56 { + trail = append(trail, tmp[0:56-length%64]...) + } else { + trail = append(trail, tmp[0:64+56-length%64]...) + } + + // Length in bits. + length <<= 3 + binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits + + trail = append(trail, tmp[0:8]...) + if len(trail)%BlockSize != 0 { + panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx)) + } + sumCh := sumChPool.Get().(chan sumResult) + d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true) + + sum := <-sumCh + sumChPool.Put(sumCh) + + return append(in, sum.digest[:]...) +} + +// sendBlock will send a block for processing. +// If cycle is true we will block on cycle, otherwise we will only block +// if the block channel is full. +func (d *md5Digest) sendBlock(bi blockInput, cycle bool) { + if cycle { + select { + case d.blocksCh <- bi: + d.cycleServer <- d.uid + } + return + } + // Only block on cycle if we filled the buffer + select { + case d.blocksCh <- bi: + return + default: + d.cycleServer <- d.uid + d.blocksCh <- bi + } +} diff --git a/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/vendor/github.com/minio/md5-simd/md5-server_amd64.go new file mode 100644 index 000000000000..94f741c54535 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-server_amd64.go @@ -0,0 +1,397 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +import ( + "encoding/binary" + "fmt" + "runtime" + "sync" + + "github.com/klauspost/cpuid/v2" +) + +// MD5 initialization constants +const ( + // Lanes is the number of concurrently calculated hashes. + Lanes = 16 + + init0 = 0x67452301 + init1 = 0xefcdab89 + init2 = 0x98badcfe + init3 = 0x10325476 + + // Use scalar routine when below this many lanes + useScalarBelow = 3 +) + +// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialisation value of 0 +const md5ServerUID = Lanes + +const buffersPerLane = 3 + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + sumCh chan sumResult + reset bool +} + +type sumResult struct { + digest [Size]byte +} + +type lanesInfo [Lanes]blockInput + +// md5Server - Type to implement parallel handling of MD5 invocations +type md5Server struct { + uidCounter uint64 + cycle chan uint64 // client with uid has update. + newInput chan newClient // Add new client. + digests map[uint64][Size]byte // Map of uids to (interim) digest results + maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds + maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core) + maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core) + allBufs []byte // Preallocated buffer. + buffers chan []byte // Preallocated buffers, sliced from allBufs. + + i8 [2][8][]byte // avx2 temporary vars + d8a, d8b digest8 + wg sync.WaitGroup +} + +// NewServer - Create new object for parallel processing handling +func NewServer() Server { + if !cpuid.CPU.Supports(cpuid.AVX2) { + return &fallbackServer{} + } + md5srv := &md5Server{} + md5srv.digests = make(map[uint64][Size]byte) + md5srv.newInput = make(chan newClient, Lanes) + md5srv.cycle = make(chan uint64, Lanes*10) + md5srv.uidCounter = md5ServerUID - 1 + md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize) + md5srv.buffers = make(chan []byte, buffersPerLane*Lanes) + // Fill buffers. + for i := 0; i < buffersPerLane*Lanes; i++ { + s := 32 + i*internalBlockSize + md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize] + } + + // Start a single thread for reading from the input channel + go md5srv.process(md5srv.newInput) + return md5srv +} + +type newClient struct { + uid uint64 + input chan blockInput +} + +// process - Sole handler for reading from the input channel. +func (s *md5Server) process(newClients chan newClient) { + // To fill up as many lanes as possible: + // + // 1. Wait for a cycle id. + // 2. If not already in a lane, add, otherwise leave on channel + // 3. Start timer + // 4. Check if lanes is full, if so, goto 10 (process). + // 5. If timeout, goto 10. + // 6. Wait for new id (goto 2) or timeout (goto 10). + // 10. Process. + // 11. Check all input if there is already input, if so add to lanes. + // 12. Goto 1 + + // lanes contains the lanes. + var lanes lanesInfo + // lanesFilled contains the number of filled lanes for current cycle. + var lanesFilled int + // clients contains active clients + var clients = make(map[uint64]chan blockInput, Lanes) + + addToLane := func(uid uint64) { + cl, ok := clients[uid] + if !ok { + // Unknown client. Maybe it was already removed. + return + } + // Check if we already have it. + for _, lane := range lanes[:lanesFilled] { + if lane.uid == uid { + return + } + } + // Continue until we get a block or there is nothing on channel + for { + select { + case block, ok := <-cl: + if !ok { + // Client disconnected + delete(clients, block.uid) + return + } + if block.uid != uid { + panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid)) + } + // If reset message, reset and we're done + if block.reset { + delete(s.digests, uid) + continue + } + + // If requesting sum, we will need to maintain state. + if block.sumCh != nil { + var dig digest + d, ok := s.digests[uid] + if ok { + dig.s[0] = binary.LittleEndian.Uint32(d[0:4]) + dig.s[1] = binary.LittleEndian.Uint32(d[4:8]) + dig.s[2] = binary.LittleEndian.Uint32(d[8:12]) + dig.s[3] = binary.LittleEndian.Uint32(d[12:16]) + } else { + dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3 + } + + sum := sumResult{} + // Add end block to current digest. + blockScalar(&dig.s, block.msg) + + binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0]) + binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1]) + binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2]) + binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3]) + block.sumCh <- sum + if block.msg != nil { + s.buffers <- block.msg + } + continue + } + if len(block.msg) == 0 { + continue + } + lanes[lanesFilled] = block + lanesFilled++ + return + default: + return + } + } + } + addNewClient := func(cl newClient) { + if _, ok := clients[cl.uid]; ok { + panic("internal error: duplicate client registration") + } + clients[cl.uid] = cl.input + } + + allLanesFilled := func() bool { + return lanesFilled == Lanes || lanesFilled >= len(clients) + } + + for { + // Step 1. + for lanesFilled == 0 { + select { + case cl, ok := <-newClients: + if !ok { + return + } + addNewClient(cl) + // Check if it already sent a payload. + addToLane(cl.uid) + continue + case uid := <-s.cycle: + addToLane(uid) + } + } + + fillLanes: + for !allLanesFilled() { + select { + case cl, ok := <-newClients: + if !ok { + return + } + addNewClient(cl) + + case uid := <-s.cycle: + addToLane(uid) + default: + // Nothing more queued... + break fillLanes + } + } + + // If we did not fill all lanes, check if there is more waiting + if !allLanesFilled() { + runtime.Gosched() + for uid := range clients { + addToLane(uid) + if allLanesFilled() { + break + } + } + } + if false { + if !allLanesFilled() { + fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients)) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + } else if true { + fmt.Println("all lanes filled") + } + } + // Process the lanes we could collect + s.blocks(lanes[:lanesFilled]) + + // Clear lanes... + lanesFilled = 0 + // Add all current queued + for uid := range clients { + addToLane(uid) + if allLanesFilled() { + break + } + } + } +} + +func (s *md5Server) Close() { + if s.newInput != nil { + close(s.newInput) + s.newInput = nil + } +} + +// Invoke assembly and send results back +func (s *md5Server) blocks(lanes []blockInput) { + if len(lanes) < useScalarBelow { + // Use scalar routine when below this many lanes + switch len(lanes) { + case 0: + case 1: + lane := lanes[0] + var d digest + a, ok := s.digests[lane.uid] + if ok { + d.s[0] = binary.LittleEndian.Uint32(a[0:4]) + d.s[1] = binary.LittleEndian.Uint32(a[4:8]) + d.s[2] = binary.LittleEndian.Uint32(a[8:12]) + d.s[3] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.s[0] = init0 + d.s[1] = init1 + d.s[2] = init2 + d.s[3] = init3 + } + if len(lane.msg) > 0 { + // Update... + blockScalar(&d.s, lane.msg) + } + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], d.s[0]) + binary.LittleEndian.PutUint32(dig[4:], d.s[1]) + binary.LittleEndian.PutUint32(dig[8:], d.s[2]) + binary.LittleEndian.PutUint32(dig[12:], d.s[3]) + s.digests[lane.uid] = dig + + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[0] = blockInput{} + + default: + s.wg.Add(len(lanes)) + var results [useScalarBelow]digest + for i := range lanes { + lane := lanes[i] + go func(i int) { + var d digest + defer s.wg.Done() + a, ok := s.digests[lane.uid] + if ok { + d.s[0] = binary.LittleEndian.Uint32(a[0:4]) + d.s[1] = binary.LittleEndian.Uint32(a[4:8]) + d.s[2] = binary.LittleEndian.Uint32(a[8:12]) + d.s[3] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.s[0] = init0 + d.s[1] = init1 + d.s[2] = init2 + d.s[3] = init3 + } + if len(lane.msg) == 0 { + results[i] = d + return + } + // Update... + blockScalar(&d.s, lane.msg) + results[i] = d + }(i) + } + s.wg.Wait() + for i, lane := range lanes { + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], results[i].s[0]) + binary.LittleEndian.PutUint32(dig[4:], results[i].s[1]) + binary.LittleEndian.PutUint32(dig[8:], results[i].s[2]) + binary.LittleEndian.PutUint32(dig[12:], results[i].s[3]) + s.digests[lane.uid] = dig + + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[i] = blockInput{} + } + } + return + } + + inputs := [16][]byte{} + for i := range lanes { + inputs[i] = lanes[i].msg + } + + // Collect active digests... + state := s.getDigests(lanes) + // Process all lanes... + s.blockMd5_x16(&state, inputs, len(lanes) <= 8) + + for i, lane := range lanes { + uid := lane.uid + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], state.v0[i]) + binary.LittleEndian.PutUint32(dig[4:], state.v1[i]) + binary.LittleEndian.PutUint32(dig[8:], state.v2[i]) + binary.LittleEndian.PutUint32(dig[12:], state.v3[i]) + + s.digests[uid] = dig + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[i] = blockInput{} + } +} + +func (s *md5Server) getDigests(lanes []blockInput) (d digest16) { + for i, lane := range lanes { + a, ok := s.digests[lane.uid] + if ok { + d.v0[i] = binary.LittleEndian.Uint32(a[0:4]) + d.v1[i] = binary.LittleEndian.Uint32(a[4:8]) + d.v2[i] = binary.LittleEndian.Uint32(a[8:12]) + d.v3[i] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.v0[i] = init0 + d.v1[i] = init1 + d.v2[i] = init2 + d.v3[i] = init3 + } + } + return +} diff --git a/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/vendor/github.com/minio/md5-simd/md5-server_fallback.go new file mode 100644 index 000000000000..7814dada3cbe --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-server_fallback.go @@ -0,0 +1,12 @@ +//+build !amd64 appengine !gc noasm + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +// NewServer - Create new object for parallel processing handling +func NewServer() *fallbackServer { + return &fallbackServer{} +} diff --git a/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/vendor/github.com/minio/md5-simd/md5-util_amd64.go new file mode 100644 index 000000000000..73981b0eb966 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-util_amd64.go @@ -0,0 +1,85 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type digest struct { + s [4]uint32 +} + +// Helper struct for generating number of rounds in combination with mask for valid lanes +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) { + // Sort on blocks length small to large + var sorted [8]lane + for c, inpt := range input[:] { + sorted[c] = lane{uint(len(inpt)), uint(c)} + for i := c - 1; i >= 0; i-- { + // swap so largest is at the end... + if sorted[i].len > sorted[i+1].len { + sorted[i], sorted[i+1] = sorted[i+1], sorted[i] + continue + } + break + } + } + + // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks + m, round := uint64(0xff), uint64(0) + + for _, s := range sorted[:] { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} + rounds++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + return +} + +func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) { + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input[:] { + sorted[c] = lane{uint(len(inpt)), uint(c)} + for i := c - 1; i >= 0; i-- { + // swap so largest is at the end... + if sorted[i].len > sorted[i+1].len { + sorted[i], sorted[i+1] = sorted[i+1], sorted[i] + continue + } + break + } + } + + // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks + m, round := uint64(0xffff), uint64(0) + + for _, s := range sorted[:] { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} + rounds++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + return +} diff --git a/vendor/github.com/minio/md5-simd/md5.go b/vendor/github.com/minio/md5-simd/md5.go new file mode 100644 index 000000000000..11b0cb962b90 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5.go @@ -0,0 +1,63 @@ +package md5simd + +import ( + "crypto/md5" + "hash" + "sync" +) + +const ( + // The blocksize of MD5 in bytes. + BlockSize = 64 + + // The size of an MD5 checksum in bytes. + Size = 16 + + // internalBlockSize is the internal block size. + internalBlockSize = 32 << 10 +) + +type Server interface { + NewHash() Hasher + Close() +} + +type Hasher interface { + hash.Hash + Close() +} + +// StdlibHasher returns a Hasher that uses the stdlib for hashing. +// Used hashers are stored in a pool for fast reuse. +func StdlibHasher() Hasher { + return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} +} + +// md5Wrapper is a wrapper around the builtin hasher. +type md5Wrapper struct { + hash.Hash +} + +var md5Pool = sync.Pool{New: func() interface{} { + return md5.New() +}} + +// fallbackServer - Fallback when no assembly is available. +type fallbackServer struct { +} + +// NewHash -- return regular Golang md5 hashing from crypto +func (s *fallbackServer) NewHash() Hasher { + return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} +} + +func (s *fallbackServer) Close() { +} + +func (m *md5Wrapper) Close() { + if m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + m.Hash = nil + } +} diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.go b/vendor/github.com/minio/md5-simd/md5block_amd64.go new file mode 100644 index 000000000000..4c2793662d5b --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5block_amd64.go @@ -0,0 +1,11 @@ +// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +package md5simd + +// Encode p to digest +//go:noescape +func blockScalar(dig *[4]uint32, p []byte) diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.s b/vendor/github.com/minio/md5-simd/md5block_amd64.s new file mode 100644 index 000000000000..fbc4a21f2b68 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5block_amd64.s @@ -0,0 +1,714 @@ +// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +// func blockScalar(dig *[4]uint32, p []byte) +TEXT ·blockScalar(SB), $0-32 + MOVQ p_len+16(FP), AX + MOVQ dig+0(FP), CX + MOVQ p_base+8(FP), DX + SHRQ $0x06, AX + SHLQ $0x06, AX + LEAQ (DX)(AX*1), AX + CMPQ DX, AX + JEQ end + MOVL (CX), BX + MOVL 4(CX), BP + MOVL 8(CX), SI + MOVL 12(CX), CX + MOVL $0xffffffff, DI + +loop: + MOVL (DX), R8 + MOVL CX, R9 + MOVL BX, R10 + MOVL BP, R11 + MOVL SI, R12 + MOVL CX, R13 + + // ROUND1 + XORL SI, R9 + ADDL $0xd76aa478, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 4(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0xe8c7b756, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 8(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0x242070db, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 12(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0xc1bdceee, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 16(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0xf57c0faf, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 20(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0x4787c62a, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 24(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xa8304613, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 28(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0xfd469501, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 32(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0x698098d8, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 36(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0x8b44f7af, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 40(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xffff5bb1, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 44(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0x895cd7be, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 48(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0x6b901122, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 52(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0xfd987193, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 56(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xa679438e, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 60(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0x49b40821, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 4(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + + // ROUND2 + MOVL CX, R9 + MOVL CX, R14 + XORL DI, R9 + ADDL $0xf61e2562, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 24(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xc040b340, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 44(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0x265e5a51, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL (DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0xe9b6c7aa, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 20(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0xd62f105d, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 40(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0x02441453, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 60(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0xd8a1e681, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 16(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0xe7d3fbc8, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 36(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0x21e1cde6, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 56(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xc33707d6, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 12(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0xf4d50d87, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 32(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0x455a14ed, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 52(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0xa9e3e905, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 8(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xfcefa3f8, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 28(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0x676f02d9, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 48(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0x8d2a4c8a, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 20(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + + // ROUND3 + MOVL SI, R9 + ADDL $0xfffa3942, BX + ADDL R8, BX + MOVL 32(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0x8771f681, CX + ADDL R8, CX + MOVL 44(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0x6d9d6122, SI + ADDL R8, SI + MOVL 56(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xfde5380c, BP + ADDL R8, BP + MOVL 4(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0xa4beea44, BX + ADDL R8, BX + MOVL 16(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0x4bdecfa9, CX + ADDL R8, CX + MOVL 28(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0xf6bb4b60, SI + ADDL R8, SI + MOVL 40(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xbebfbc70, BP + ADDL R8, BP + MOVL 52(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0x289b7ec6, BX + ADDL R8, BX + MOVL (DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0xeaa127fa, CX + ADDL R8, CX + MOVL 12(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0xd4ef3085, SI + ADDL R8, SI + MOVL 24(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0x04881d05, BP + ADDL R8, BP + MOVL 36(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0xd9d4d039, BX + ADDL R8, BX + MOVL 48(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0xe6db99e5, CX + ADDL R8, CX + MOVL 60(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0x1fa27cf8, SI + ADDL R8, SI + MOVL 8(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xc4ac5665, BP + ADDL R8, BP + MOVL (DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + + // ROUND4 + MOVL DI, R9 + XORL CX, R9 + ADDL $0xf4292244, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 28(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0x432aff97, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 56(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xab9423a7, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 20(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0xfc93a039, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 48(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0x655b59c3, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 12(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0x8f0ccc92, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 40(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xffeff47d, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 4(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0x85845dd1, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 32(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0x6fa87e4f, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 60(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0xfe2ce6e0, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 24(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xa3014314, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 52(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0x4e0811a1, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 16(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0xf7537e82, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 44(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0xbd3af235, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 8(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0x2ad7d2bb, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 36(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0xeb86d391, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + ROLL $0x15, BP + ADDL SI, BP + ADDL R10, BX + ADDL R11, BP + ADDL R12, SI + ADDL R13, CX + + // Prepare next loop + ADDQ $0x40, DX + CMPQ DX, AX + JB loop + + // Write output + MOVQ dig+0(FP), AX + MOVL BX, (AX) + MOVL BP, 4(AX) + MOVL SI, 8(AX) + MOVL CX, 12(AX) + +end: + RET diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore new file mode 100644 index 000000000000..8ae0384ebce8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -0,0 +1,6 @@ +*~ +*.test +validator +golangci-lint +functional_tests +.idea \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml new file mode 100644 index 000000000000..88442e0cfef4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -0,0 +1,72 @@ +version: "2" +linters: + disable-all: true + enable: + - durationcheck + - gocritic + - gomodguard + - govet + - ineffassign + - misspell + - revive + - staticcheck + - unconvert + - unused + - usetesting + - whitespace + settings: + misspell: + locale: US + staticcheck: + checks: + - all + - -SA1008 + - -SA1019 + - -SA4000 + - -SA9004 + - -ST1000 + - -ST1005 + - -ST1016 + - -ST1021 + - -ST1020 + - -U1000 + exclusions: + generated: lax + rules: + - path: (.+)\.go$ + text: "empty-block:" + - path: (.+)\.go$ + text: "unused-parameter:" + - path: (.+)\.go$ + text: "dot-imports:" + - path: (.+)\.go$ + text: "singleCaseSwitch: should rewrite switch statement to if statement" + - path: (.+)\.go$ + text: "unlambda: replace" + - path: (.+)\.go$ + text: "captLocal:" + - path: (.+)\.go$ + text: "should have a package comment" + - path: (.+)\.go$ + text: "ifElseChain:" + - path: (.+)\.go$ + text: "elseif:" + - path: (.+)\.go$ + text: "Error return value of" + - path: (.+)\.go$ + text: "unnecessary conversion" + - path: (.+)\.go$ + text: "Error return value is not checked" +issues: + max-issues-per-linter: 100 + max-same-issues: 100 +formatters: + enable: + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/minio/minio-go/v7/CLAUDE.md b/vendor/github.com/minio/minio-go/v7/CLAUDE.md new file mode 100644 index 000000000000..26ff953237b8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CLAUDE.md @@ -0,0 +1,125 @@ +CLAUDE.md +========= + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +Commands +-------- + +### Testing + +```bash +# Run all tests with race detection (requires MinIO server at localhost:9000) +SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +# Run tests without race detection +go test ./... + +# Run short tests only (no functional tests) +go test -short -race ./... + +# Run functional tests +go build -race functional_tests.go +SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests + +# Run functional tests without TLS +SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=0 MINT_MODE=full ./functional_tests +``` + +### Linting and Code Quality + +```bash +# Run all checks (lint, vet, test, examples, functional tests) +make checks + +# Run linter only +make lint + +# Run vet and staticcheck +make vet + +# Alternative: run golangci-lint directly +golangci-lint run --timeout=5m --config ./.golangci.yml +``` + +### Building Examples + +```bash +# Build all examples +make examples + +# Build a specific example +cd examples/s3 && go build -mod=mod putobject.go +``` + +Architecture +------------ + +### Core Client Structure + +The MinIO Go SDK is organized around a central `Client` struct (api.go:52) that implements Amazon S3 compatible methods. Key architectural patterns: + +1. **Modular API Organization**: API methods are split into logical files: + + - `api-bucket-*.go`: Bucket operations (lifecycle, encryption, versioning, etc.) + - `api-object-*.go`: Object operations (legal hold, retention, tagging, etc.) + - `api-get-*.go`, `api-put-*.go`: GET and PUT operations + - `api-list.go`: Listing operations + - `api-stat.go`: Status/info operations + +2. **Credential Management**: The `pkg/credentials/` package provides various credential providers: + + - Static credentials + - Environment variables (AWS/MinIO) + - IAM roles + - STS (Security Token Service) variants + - File-based credentials + - Chain provider for fallback mechanisms + +3. **Request Signing**: The `pkg/signer/` package handles AWS signature versions: + + - V2 signatures (legacy) + - V4 signatures (standard) + - Streaming signatures for large uploads + +4. **Transport Layer**: Custom HTTP transport with: + + - Retry logic with configurable max retries + - Health status monitoring + - Tracing support via httptrace + - Bucket location caching (`bucketLocCache`\) + - Session caching for credentials + +5. **Helper Packages**: + + - `pkg/encrypt/`: Server-side encryption utilities + - `pkg/notification/`: Event notification handling + - `pkg/policy/`: Bucket policy management + - `pkg/lifecycle/`: Object lifecycle rules + - `pkg/tags/`: Object and bucket tagging + - `pkg/s3utils/`: S3 utility functions + - `pkg/kvcache/`: Key-value caching + - `pkg/singleflight/`: Deduplication of concurrent requests + +### Testing Strategy + +- Unit tests alongside implementation files (`*_test.go`\) +- Comprehensive functional tests in `functional_tests.go` requiring a live MinIO server +- Example programs in `examples/` directory demonstrating API usage +- Build tag `//go:build mint` for integration tests + +### Error Handling + +- Custom error types in `api-error-response.go` +- HTTP status code mapping +- Retry logic for transient failures +- Detailed error context preservation + +Important Patterns +------------------ + +1. **Context Usage**: All API methods accept `context.Context` for cancellation and timeout control +2. **Options Pattern**: Methods use Options structs for optional parameters (e.g., `PutObjectOptions`, `GetObjectOptions`\) +3. **Streaming Support**: Large file operations use io.Reader/Writer interfaces for memory efficiency +4. **Bucket Lookup Types**: Supports both path-style and virtual-host-style S3 URLs +5. **MD5/SHA256 Hashing**: Configurable hash functions for integrity checks via `md5Hasher` and `sha256Hasher` diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME new file mode 100644 index 000000000000..d365a7bb29c9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CNAME @@ -0,0 +1 @@ +minio-go.min.io \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md new file mode 100644 index 000000000000..e976dd6befc2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md @@ -0,0 +1,23 @@ +### Developer Guidelines + +`minio-go` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +- Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +- When you're ready to create a pull request, be sure to: + + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables`ACCESS_KEY` and `SECRET_KEY`. To run shorter version of the tests please use `go test -short -race ./...` + +- Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v7/CREDITS b/vendor/github.com/minio/minio-go/v7/CREDITS new file mode 100644 index 000000000000..154c9fd58db7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CREDITS @@ -0,0 +1,1101 @@ +Go (the standard library) +https://golang.org/ +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/davecgh/go-spew +https://github.com/davecgh/go-spew +---------------------------------------------------------------- +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +================================================================ + +github.com/dustin/go-humanize +https://github.com/dustin/go-humanize +---------------------------------------------------------------- +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +================================================================ + +github.com/goccy/go-json +https://github.com/goccy/go-json +---------------------------------------------------------------- +MIT License + +Copyright (c) 2020 Masaaki Goshima + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/google/uuid +https://github.com/google/uuid +---------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/klauspost/compress +https://github.com/klauspost/compress +---------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/klauspost/cpuid/v2 +https://github.com/klauspost/cpuid/v2 +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +================================================================ + +github.com/minio/md5-simd +https://github.com/minio/md5-simd +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/pmezard/go-difflib +https://github.com/pmezard/go-difflib +---------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/rs/xid +https://github.com/rs/xid +---------------------------------------------------------------- +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================ + +github.com/stretchr/testify +https://github.com/stretchr/testify +---------------------------------------------------------------- +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +golang.org/x/crypto +https://golang.org/x/crypto +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/net +https://golang.org/x/net +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/sys +https://golang.org/x/sys +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/text +https://golang.org/x/text +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +gopkg.in/ini.v1 +https://gopkg.in/ini.v1 +---------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/minio/minio-go/v7/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt rename to vendor/github.com/minio/minio-go/v7/LICENSE diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md new file mode 100644 index 000000000000..9b189373f779 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md @@ -0,0 +1,43 @@ +For maintainers only +==================== + +Responsibilities +---------------- + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases + +Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. + +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` + +### Update version + +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh +$ grep libraryVersion api.go + libraryVersion = "4.0.1" +``` + +Commit your changes + +``` +$ git commit -a -m "Update version for next release" --author "MinIO Trusted " +``` + +### Announce + +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` + +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +``` diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile new file mode 100644 index 000000000000..9e4ddc4c88ad --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -0,0 +1,42 @@ +GOPATH := $(shell go env GOPATH) +TMPDIR := $(shell mktemp -d) + +all: checks + +.PHONY: examples docs + +checks: lint vet test examples functional-test + +lint: + @mkdir -p ${GOPATH}/bin + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin + @echo "Running $@ check" + @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean + @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml + +vet: + @GO111MODULE=on go vet ./... + @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest + ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005" + +test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +examples: + @echo "Building s3 examples" + @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + @echo "Building minio examples" + @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + +functional-test: + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests + +functional-test-notls: + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=0 MINT_MODE=full ./functional_tests + +clean: + @echo "Cleaning up all the generated files" + @find . -name '*.test' | xargs rm -fv + @find . -name '*~' | xargs rm -fv diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE new file mode 100644 index 000000000000..1e8fd3b92a5d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/NOTICE @@ -0,0 +1,9 @@ +MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. + +This product includes software developed at MinIO, Inc. +(https://min.io/). + +The MinIO project contains unmodified/modified subcomponents too with +separate copyright notices and license terms. Your use of the source +code for these subcomponents is subject to the terms and conditions +of Apache License Version 2.0 diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md new file mode 100644 index 000000000000..36c1004c9ac0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -0,0 +1,318 @@ +MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) +================================================================================================================================================================================================================================================================================================================================================================================================================== + +The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage. + +This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader. For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). + +These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html). + +Download from Github +-------------------- + +From your project directory: + +```sh +go get github.com/minio/minio-go/v7 +``` + +Initialize a MinIO Client Object +-------------------------------- + +The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage: + +| Parameter | Description | +|-------------------|------------------------------------------------------------| +| `endpoint` | URL to object storage service. | +| `_minio.Options_` | All the options such as credentials, custom transport etc. | + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now set up +} +``` + +Example - File Uploader +----------------------- + +This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket. It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io). + +The `play` server runs the latest stable version of MinIO and may be used for testing and development. The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected. + +### FileUploader.go + +This example does the following: + +- Connects to the MinIO `play` server using the provided credentials. +- Creates a bucket named `testbucket`. +- Uploads a file named `testdata` from `/tmp`. +- Verifies the file was created using `mc ls`. + + ```go + // FileUploader.go MinIO example + package main + + import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + ) + + func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucket called testbucket. + bucketName := "testbucket" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the test file + // Change the value of filePath if the file is in another location + objectName := "testdata" + filePath := "/tmp/testdata" + contentType := "application/octet-stream" + + // Upload the test file with FPutObject + info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) + } + ``` + +**1. Create a test file containing data:** + +You can do this with `dd` on Linux or macOS systems: + +```sh +dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10 +``` + +or `fsutil` on Windows: + +```sh +fsutil file createnew "C:\Users\\Desktop\sample.txt" 20480 +``` + +**2. Run FileUploader with the following commands:** + +```sh +go mod init example/FileUploader +go get github.com/minio/minio-go/v7 +go get github.com/minio/minio-go/v7/pkg/credentials +go run FileUploader.go +``` + +The output resembles the following: + +```sh +2023/11/01 14:27:55 Successfully created testbucket +2023/11/01 14:27:55 Successfully uploaded testdata of size 20480 +``` + +**3. Verify the Uploaded File With `mc ls`:** + +```sh +mc ls play/testbucket +[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile +``` + +API Reference +------------- + +The full API Reference is available here. + +- [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) + +### API Reference : Bucket Operations + +- [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +- [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +- [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +- [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +- [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +- [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) + +### API Reference : Bucket policy Operations + +- [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +- [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) + +### API Reference : Bucket notification Operations + +- [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +- [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +- [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +- [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +- [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) + +### API Reference : File Object Operations + +- [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +- [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) + +### API Reference : Object Operations + +- [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +- [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +- [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +- [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +- [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +- [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +- [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +- [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +- [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) + +### API Reference : Presigned Operations + +- [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +- [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +- [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +- [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) + +### API Reference : Client custom settings + +- [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +- [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +- [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) + +Full Examples +------------- + +### Full Examples : Bucket Operations + +- [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +- [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +- [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +- [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +- [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +- [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +- [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### Full Examples : Bucket policy Operations + +- [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +- [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +- [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### Full Examples : Bucket lifecycle Operations + +- [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +- [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### Full Examples : Bucket encryption Operations + +- [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +- [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +- [removebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketencryption.go) + +### Full Examples : Bucket replication Operations + +- [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +- [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +- [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + +### Full Examples : Bucket notification Operations + +- [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +- [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +- [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +- [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) +- [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) + +### Full Examples : File Object Operations + +- [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +- [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) + +### Full Examples : Object Operations + +- [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +- [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +- [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +- [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +- [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +- [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +- [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### Full Examples : Encrypted Object Operations + +- [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +- [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +- [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### Full Examples : Presigned Operations + +- [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +- [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +- [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +- [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +Explore Further +--------------- + +- [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) +- [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +- [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) + +Contribute +---------- + +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +License +------- + +This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/api-append-object.go b/vendor/github.com/minio/minio-go/v7/api-append-object.go new file mode 100644 index 000000000000..b1bddf986e11 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-append-object.go @@ -0,0 +1,230 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// AppendObjectOptions https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html +type AppendObjectOptions struct { + // Provide a progress reader to indicate the current append() progress. + Progress io.Reader + // ChunkSize indicates the maximum append() size, + // it is useful when you want to control how much data + // per append() you are interested in sending to server + // while keeping the input io.Reader of a longer length. + ChunkSize uint64 + // Aggressively disable sha256 payload, it is automatically + // turned-off for TLS supporting endpoints, useful in benchmarks + // where you are interested in the peak() numbers. + DisableContentSha256 bool + + customHeaders http.Header + checksumType ChecksumType +} + +// Header returns the custom header for AppendObject API +func (opts AppendObjectOptions) Header() (header http.Header) { + header = make(http.Header) + for k, v := range opts.customHeaders { + header[k] = v + } + return header +} + +func (opts *AppendObjectOptions) setWriteOffset(offset int64) { + if len(opts.customHeaders) == 0 { + opts.customHeaders = make(http.Header) + } + opts.customHeaders["x-amz-write-offset-bytes"] = []string{strconv.FormatInt(offset, 10)} +} + +func (opts *AppendObjectOptions) setChecksumParams(info ObjectInfo) { + if len(opts.customHeaders) == 0 { + opts.customHeaders = make(http.Header) + } + fullObject := info.ChecksumMode == ChecksumFullObjectMode.String() + switch { + case info.ChecksumCRC32 != "": + if fullObject { + opts.checksumType = ChecksumFullObjectCRC32 + } + case info.ChecksumCRC32C != "": + if fullObject { + opts.checksumType = ChecksumFullObjectCRC32C + } + case info.ChecksumCRC64NVME != "": + // CRC64NVME only has a full object variant + // so it does not carry any special full object + // modifier + opts.checksumType = ChecksumCRC64NVME + } +} + +func (opts AppendObjectOptions) validate(c *Client) (err error) { + if opts.ChunkSize > maxPartSize { + return errInvalidArgument("Append chunkSize cannot be larger than max part size allowed") + } + switch { + case !c.trailingHeaderSupport: + return errInvalidArgument("AppendObject() requires Client with TrailingHeaders enabled") + case c.overrideSignerType.IsV2(): + return errInvalidArgument("AppendObject() cannot be used with v2 signatures") + case s3utils.IsGoogleEndpoint(*c.endpointURL): + return errInvalidArgument("AppendObject() cannot be used with GCS endpoints") + } + + return nil +} + +// appendObjectDo - executes the append object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts AppendObjectOptions) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + streamSha256: !opts.DisableContentSha256, + } + + if opts.checksumType.IsSet() { + reqMetadata.addCrc = &opts.checksumType + reqMetadata.customHeader.Set(amzChecksumAlgo, opts.checksumType.String()) + if opts.checksumType.FullObjectRequested() { + reqMetadata.customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + h := resp.Header + + // When AppendObject() is used, S3 Express will return final object size as x-amz-object-size + if amzSize := h.Get("x-amz-object-size"); amzSize != "" { + size, err = strconv.ParseInt(amzSize, 10, 64) + if err != nil { + return UploadInfo{}, err + } + } + + return UploadInfo{ + Bucket: bucketName, + Key: objectName, + ETag: trimEtag(h.Get("ETag")), + Size: size, + + // Checksum values + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), + ChecksumMode: h.Get(ChecksumFullObjectMode.Key()), + }, nil +} + +// AppendObject - S3 Express Zone https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html +func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts AppendObjectOptions, +) (info UploadInfo, err error) { + if objectSize < 0 && opts.ChunkSize == 0 { + return UploadInfo{}, errors.New("object size must be provided when no chunk size is provided") + } + + if err = opts.validate(c); err != nil { + return UploadInfo{}, err + } + + oinfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{Checksum: true}) + if err != nil { + return UploadInfo{}, err + } + if oinfo.ChecksumMode != "" && oinfo.ChecksumMode != ChecksumFullObjectMode.String() { + return UploadInfo{}, fmt.Errorf("Append() is not allowed on objects that are not of FULL_OBJECT checksum type: %s", oinfo.ChecksumMode) + } + opts.setChecksumParams(oinfo) // set the appropriate checksum params based on the existing object checksum metadata. + opts.setWriteOffset(oinfo.Size) // First append must set the current object size as the offset. + + if opts.ChunkSize > 0 { + finalObjSize := int64(-1) + if objectSize > 0 { + finalObjSize = info.Size + objectSize + } + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(finalObjSize, opts.ChunkSize) + if err != nil { + return UploadInfo{}, err + } + buf := make([]byte, partSize) + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + n, err := readFull(reader, buf) + if err != nil { + return info, err + } + if n != int(partSize) { + return info, io.ErrUnexpectedEOF + } + rd := newHook(bytes.NewReader(buf[:n]), opts.Progress) + uinfo, err := c.appendObjectDo(ctx, bucketName, objectName, rd, partSize, opts) + if err != nil { + return info, err + } + opts.setWriteOffset(uinfo.Size) + } + } + + rd := newHook(reader, opts.Progress) + return c.appendObjectDo(ctx, bucketName, objectName, rd, objectSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go new file mode 100644 index 000000000000..9967fe39e63b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go @@ -0,0 +1,151 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2024 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/cors" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketCors sets the Cross-Origin Resource Sharing (CORS) configuration for the bucket. +// If corsConfig is nil, the existing CORS configuration will be removed. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - corsConfig: CORS configuration to apply (nil to remove existing configuration) +// +// Returns an error if the operation fails. +func (c *Client) SetBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if corsConfig == nil { + return c.removeBucketCors(ctx, bucketName) + } + + return c.putBucketCors(ctx, bucketName, corsConfig) +} + +func (c *Client) putBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + corsStr, err := corsConfig.ToXML() + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(corsStr), + contentLength: int64(len(corsStr)), + contentMD5Base64: sumMD5Base64([]byte(corsStr)), + } + + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +func (c *Client) removeBucketCors(ctx context.Context, bucketName string) error { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// GetBucketCors retrieves the Cross-Origin Resource Sharing (CORS) configuration from the bucket. +// If no CORS configuration exists, returns nil with no error. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the CORS configuration or an error if the operation fails. +func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + bucketCors, err := c.getBucketCors(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == NoSuchCORSConfiguration { + return nil, nil + } + return nil, err + } + return bucketCors, nil +} + +func (c *Client) getBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, // TODO: needed? copied over from other example, but not spec'd in API. + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + + corsConfig, err := cors.ParseBucketCorsConfig(resp.Body) + if err != nil { + return nil, err + } + + return corsConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go new file mode 100644 index 000000000000..3ae9fe2792bb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -0,0 +1,156 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/sse" +) + +// SetBucketEncryption sets the default encryption configuration on an existing bucket. +// The encryption configuration specifies the default encryption behavior for objects uploaded to the bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - config: Server-side encryption configuration to apply +// +// Returns an error if the operation fails or if config is nil. +func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if config == nil { + return errInvalidArgument("configuration cannot be empty") + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketEncryption removes the default encryption configuration from a bucket. +// After removal, the bucket will no longer apply default encryption to new objects. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns an error if the operation fails. +func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // DELETE default encryption configuration on a bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketEncryption retrieves the default encryption configuration from a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the bucket's encryption configuration or an error if the operation fails. +func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Execute GET on bucket to get the default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + encryptionConfig := &sse.Configuration{} + if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { + return nil, err + } + + return encryptionConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go new file mode 100644 index 000000000000..fec5cece50af --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -0,0 +1,169 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/lifecycle" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if config.Empty() { + return c.removeBucketLifecycle(ctx, bucketName) + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(ctx, bucketName, buf) +} + +// Saves a new bucket lifecycle. +func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketLifecycle fetch bucket lifecycle configuration +func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { + lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName) + return lc, err +} + +// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated +func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, time.Time{}, err + } + + bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName) + if err != nil { + return nil, time.Time{}, err + } + + config := lifecycle.NewConfiguration() + if err = xml.Unmarshal(bucketLifecycle, config); err != nil { + return nil, time.Time{}, err + } + return config, updatedAt, nil +} + +// Request server for current bucket lifecycle. +func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + urlValues.Set("withUpdatedAt", "true") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, time.Time{}, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + lcBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, time.Time{}, err + } + + const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt" + var updatedAt time.Time + if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" { + updatedAt, err = time.Parse(iso8601DateFormat, timeStr) + if err != nil { + return nil, time.Time{}, err + } + } + + return lcBytes, updatedAt, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go new file mode 100644 index 000000000000..9e2a6776918e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -0,0 +1,252 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "encoding/xml" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. +func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(&config) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { + return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) +} + +// GetBucketNotification returns current bucket notification configuration +func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return notification.Configuration{}, err + } + return c.getBucketNotification(ctx, bucketName) +} + +// Request server for notification rules. +func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return notification.Configuration{}, err + } + return processBucketNotificationResponse(bucketName, resp) +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return notification.Configuration{}, errResponse + } + var bucketNotification notification.Configuration + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return notification.Configuration{}, err + } + return bucketNotification, nil +} + +// ListenNotification listen for all events, this is a MinIO specific API +func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { + return c.ListenBucketNotification(ctx, "", prefix, suffix, events) +} + +// ListenBucketNotification listen for bucket events, this is a MinIO specific API +func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { + notificationInfoCh := make(chan notification.Info, 1) + const notificationCapacity = 4 * 1024 * 1024 + notificationEventBuffer := make([]byte, notificationCapacity) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- notification.Info) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if bucketName != "" { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + select { + case notificationInfoCh <- notification.Info{ + Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + }: + case <-ctx.Done(): + } + return + } + + // Prepare urlValues to pass into the request on every loop + urlValues := make(url.Values) + urlValues.Set("ping", "10") + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter) { + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + select { + case notificationInfoCh <- notification.Info{ + Err: errResponse, + }: + case <-ctx.Done(): + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Use a higher buffer to support unexpected + // caching done by proxies + bio.Buffer(notificationEventBuffer, notificationCapacity) + + // Unmarshal each line, returns marshaled values. + for bio.Scan() { + var notificationInfo notification.Info + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + // Unexpected error during json unmarshal, send + // the error to caller for actionable as needed. + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + closeResponse(resp) + continue + } + + // Empty events pinged from the server + if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil { + continue + } + + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-ctx.Done(): + closeResponse(resp) + return + } + } + + if err = bio.Err(); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + } + + // Close current connection before looping further. + closeResponse(resp) + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go new file mode 100644 index 000000000000..0e561bdfab2d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -0,0 +1,163 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketPolicy sets the access permissions policy on an existing bucket. +// The policy should be a valid JSON string that conforms to the IAM policy format. +// If policy is an empty string, the existing bucket policy will be removed. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - policy: JSON policy string (empty string to remove existing policy) +// +// Returns an error if the operation fails. +func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(ctx, bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(ctx, bucketName, policy) +} + +// Saves a new bucket policy. +func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: strings.NewReader(policy), + contentLength: int64(len(policy)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// GetBucketPolicy retrieves the access permissions policy for the bucket. +// If no bucket policy exists, returns an empty string with no error. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the policy as a JSON string or an error if the operation fails. +func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == NoSuchBucketPolicy { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go b/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go new file mode 100644 index 000000000000..d1493a5b9a4f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-qos.go @@ -0,0 +1,212 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2025 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "gopkg.in/yaml.v3" +) + +// QOSConfigVersionCurrent is the current version of the QoS configuration. +const QOSConfigVersionCurrent = "v1" + +// QOSConfig represents the QoS configuration for a bucket. +type QOSConfig struct { + Version string `yaml:"version"` + Rules []QOSRule `yaml:"rules"` +} + +// QOSRule represents a single QoS rule. +type QOSRule struct { + ID string `yaml:"id"` + Label string `yaml:"label,omitempty"` + Priority int `yaml:"priority"` + ObjectPrefix string `yaml:"objectPrefix"` + API string `yaml:"api"` + Rate int64 `yaml:"rate"` + Burst int64 `yaml:"burst"` // not required for concurrency limit + Limit string `yaml:"limit"` // "concurrency" or "rps" +} + +// NewQOSConfig creates a new empty QoS configuration. +func NewQOSConfig() *QOSConfig { + return &QOSConfig{ + Version: "v1", + Rules: []QOSRule{}, + } +} + +// GetBucketQOS retrieves the Quality of Service (QoS) configuration for the bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// +// Returns the QoS configuration or an error if the operation fails. +func (c *Client) GetBucketQOS(ctx context.Context, bucket string) (*QOSConfig, error) { + var qosCfg QOSConfig + // Input validation. + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("qos", "") + // Execute GET on bucket to fetch qos. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucket, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err = yaml.Unmarshal(b, &qosCfg); err != nil { + return nil, err + } + + return &qosCfg, nil +} + +// SetBucketQOS sets the Quality of Service (QoS) configuration for a bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - qosCfg: QoS configuration to apply +// +// Returns an error if the operation fails. +func (c *Client) SetBucketQOS(ctx context.Context, bucket string, qosCfg *QOSConfig) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return err + } + + data, err := yaml.Marshal(qosCfg) + if err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("qos", "") + + reqMetadata := requestMetadata{ + bucketName: bucket, + queryValues: urlValues, + contentBody: strings.NewReader(string(data)), + contentLength: int64(len(data)), + } + + // Execute PUT to upload a new bucket QoS configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucket, "") + } + } + return nil +} + +// CounterMetric returns stats for a counter +type CounterMetric struct { + Last1m uint64 `json:"last1m"` + Last1hr uint64 `json:"last1hr"` + Total uint64 `json:"total"` +} + +// QOSMetric - metric for a qos rule per bucket +type QOSMetric struct { + APIName string `json:"apiName"` + Rule QOSRule `json:"rule"` + Totals CounterMetric `json:"totals"` + Throttled CounterMetric `json:"throttleCount"` + ExceededRateLimit CounterMetric `json:"exceededRateLimitCount"` + ClientDisconnCount CounterMetric `json:"clientDisconnectCount"` + ReqTimeoutCount CounterMetric `json:"reqTimeoutCount"` +} + +// QOSNodeStats represents stats for a bucket on a single node +type QOSNodeStats struct { + Stats []QOSMetric `json:"stats"` + NodeName string `json:"node"` +} + +// GetBucketQOSMetrics retrieves Quality of Service (QoS) metrics for a bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - nodeName: Name of the node (empty string for all nodes) +// +// Returns QoS metrics per node or an error if the operation fails. +func (c *Client) GetBucketQOSMetrics(ctx context.Context, bucketName, nodeName string) (qs []QOSNodeStats, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return qs, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("qos-metrics", "") + if nodeName != "" { + urlValues.Set("node", nodeName) + } + // Execute GET on bucket to get qos metrics. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return qs, err + } + + if resp.StatusCode != http.StatusOK { + return qs, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return qs, err + } + + if err := json.Unmarshal(respBytes, &qs); err != nil { + return qs, err + } + return qs, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go new file mode 100644 index 000000000000..6dd7ae8934ca --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -0,0 +1,458 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/replication" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RemoveBucketReplication removes the replication configuration from an existing bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns an error if the operation fails. +func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { + return c.removeBucketReplication(ctx, bucketName) +} + +// SetBucketReplication sets the replication configuration on an existing bucket. +// If the provided configuration is empty, this method removes the existing replication configuration. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - cfg: Replication configuration to apply +// +// Returns an error if the operation fails. +func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If replication is empty then delete it. + if cfg.Empty() { + return c.removeBucketReplication(ctx, bucketName) + } + // Save the updated replication. + return c.putBucketReplication(ctx, bucketName, cfg) +} + +// Saves a new bucket replication. +func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + replication, err := xml.Marshal(cfg) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(replication), + contentLength: int64(len(replication)), + contentMD5Base64: sumMD5Base64(replication), + } + + // Execute PUT to upload a new bucket replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// Remove replication from a bucket. +func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketReplication retrieves the bucket replication configuration. +// If no replication configuration is found, returns an empty config with nil error. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the replication configuration or an error if the operation fails. +func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return cfg, err + } + bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "ReplicationConfigurationNotFoundError" { + return cfg, nil + } + return cfg, err + } + return bucketReplicationCfg, nil +} + +// Request server for current bucket replication config. +func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return cfg, err + } + + if resp.StatusCode != http.StatusOK { + return cfg, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = xmlDecoder(resp.Body, &cfg); err != nil { + return cfg, err + } + + return cfg, nil +} + +// GetBucketReplicationMetrics retrieves bucket replication status metrics. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the replication metrics or an error if the operation fails. +func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// mustGetUUID - get a random UUID. +func mustGetUUID() string { + u, err := uuid.NewRandom() + if err != nil { + return "" + } + return u.String() +} + +// ResetBucketReplication initiates replication of previously replicated objects. +// This requires ExistingObjectReplication to be enabled in the replication configuration. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - olderThan: Only replicate objects older than this duration (0 for all objects) +// +// Returns a reset ID that can be used to track the operation, or an error if the operation fails. +func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { + rID = mustGetUUID() + _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) + if err != nil { + return rID, err + } + return rID, nil +} + +// ResetBucketReplicationOnTarget initiates replication of previously replicated objects to a specific target. +// This requires ExistingObjectReplication to be enabled in the replication configuration. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - olderThan: Only replicate objects older than this duration (0 for all objects) +// - tgtArn: ARN of the target to reset replication for +// +// Returns resync target information or an error if the operation fails. +func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { + return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return rinfo, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset", "") + if olderThan > 0 { + urlValues.Set("older-than", olderThan.String()) + } + if tgtArn != "" { + urlValues.Set("arn", tgtArn) + } + urlValues.Set("reset-id", resetID) + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} + +// GetBucketReplicationResyncStatus retrieves the status of a replication resync operation. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - arn: ARN of the replication target (empty string for all targets) +// +// Returns resync status information or an error if the operation fails. +func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return rinfo, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset-status", "") + if arn != "" { + urlValues.Set("arn", arn) + } + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} + +// CancelBucketReplicationResync cancels an in-progress replication resync operation. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - tgtArn: ARN of the replication target (empty string for all targets) +// +// Returns the ID of the canceled resync operation or an error if the operation fails. +func (c *Client) CancelBucketReplicationResync(ctx context.Context, bucketName string, tgtArn string) (id string, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return id, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset-cancel", "") + if tgtArn != "" { + urlValues.Set("arn", tgtArn) + } + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return id, err + } + + if resp.StatusCode != http.StatusOK { + return id, httpRespToErrorResponse(resp, bucketName, "") + } + strBuf, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + id = string(strBuf) + return id, nil +} + +// GetBucketReplicationMetricsV2 retrieves bucket replication status metrics using the V2 API. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the V2 replication metrics or an error if the operation fails. +func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "2") + + // Execute GET on bucket to get replication metrics. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// CheckBucketReplication validates whether replication is properly configured for a bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns nil if replication is valid, or an error describing the validation failure. +func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-check", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go new file mode 100644 index 000000000000..921f90f999bc --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -0,0 +1,153 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// GetBucketTagging fetches the tagging configuration for a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns the bucket's tags or an error if the operation fails. +func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute GET on bucket to get tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + defer io.Copy(io.Discard, resp.Body) + return tags.ParseBucketXML(resp.Body) +} + +// SetBucketTagging sets the tagging configuration for a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - tags: Tag set to apply to the bucket +// +// Returns an error if the operation fails or if tags is nil. +func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if tags == nil { + return errors.New("nil tags passed") + } + + buf, err := xml.Marshal(tags) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT on bucket to put tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketTagging removes the tagging configuration from a bucket. +// It uses the provided context to control cancellations and timeouts. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// +// Returns an error if the operation fails. +func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute DELETE on bucket to remove tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go new file mode 100644 index 000000000000..045e3c38ec6f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketVersioning sets a bucket versioning configuration +func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + contentSHA256Hex: sum256Hex(buf), + } + + // Execute PUT to set a bucket versioning. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// EnableVersioning - enable object versioning in given bucket. +func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) +} + +// SuspendVersioning - suspend object versioning in given bucket. +func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) +} + +// ExcludedPrefix - holds individual prefixes excluded from being versioned. +type ExcludedPrefix struct { + Prefix string +} + +// BucketVersioningConfiguration is the versioning configuration structure +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` + MFADelete string `xml:"MfaDelete,omitempty"` + // MinIO extension - allows selective, prefix-level versioning exclusion. + // Requires versioning to be enabled + ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"` + ExcludeFolders bool `xml:",omitempty"` + PurgeOnDelete string `xml:",omitempty"` +} + +// Various supported states +const ( + Enabled = "Enabled" + // Disabled State = "Disabled" only used by MFA Delete not supported yet. + Suspended = "Suspended" +) + +// Enabled returns true if bucket versioning is enabled +func (b BucketVersioningConfiguration) Enabled() bool { + return b.Status == Enabled +} + +// Suspended returns true if bucket versioning is suspended +func (b BucketVersioningConfiguration) Suspended() bool { + return b.Status == Suspended +} + +// GetBucketVersioning gets the versioning configuration on +// an existing bucket with a context to control cancellations and timeouts. +func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketVersioningConfiguration{}, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + // Execute GET on bucket to get the versioning configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return BucketVersioningConfiguration{}, err + } + + if resp.StatusCode != http.StatusOK { + return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") + } + + versioningConfig := BucketVersioningConfiguration{} + if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { + return versioningConfig, err + } + + return versioningConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go new file mode 100644 index 000000000000..232bd2c01d02 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -0,0 +1,626 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs +type CopyDestOptions struct { + Bucket string // points to destination bucket + Object string // points to destination object + + // `Encryption` is the key info for server-side-encryption with customer + // provided key. If it is nil, no encryption is performed. + Encryption encrypt.ServerSide + + ChecksumType ChecksumType + + // `userMeta` is the user-metadata key-value pairs to be set on the + // destination. The keys are automatically prefixed with `x-amz-meta-` + // if needed. If nil is passed, and if only a single source (of any + // size) is provided in the ComposeObject call, then metadata from the + // source is copied to the destination. + // if no user-metadata is provided, it is copied from source + // (when there is only one source object in the compose + // request) + UserMetadata map[string]string + // UserMetadata is only set to destination if ReplaceMetadata is true + // other value is UserMetadata is ignored and we preserve src.UserMetadata + // NOTE: if you set this value to true and now metadata is present + // in UserMetadata your destination object will not have any metadata + // set. + ReplaceMetadata bool + + // `userTags` is the user defined object tags to be set on destination. + // This will be set only if the `replaceTags` field is set to true. + // Otherwise this field is ignored + UserTags map[string]string + ReplaceTags bool + + // Specifies whether you want to apply a Legal Hold to the copied object. + LegalHold LegalHoldStatus + + // Object Retention related fields + Mode RetentionMode + RetainUntilDate time.Time + Expires time.Time + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + + Size int64 // Needs to be specified if progress bar is specified. + // Progress of the entire copy operation will be sent here. + Progress io.Reader +} + +// Process custom-metadata to remove a `x-amz-meta-` prefix if +// present and validate that keys are distinct (after this +// prefix removal). +func filterCustomMeta(userMeta map[string]string) map[string]string { + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + k = k[len("x-amz-meta-"):] + } + if _, ok := m[k]; ok { + continue + } + m[k] = v + } + return m +} + +// Marshal converts all the CopyDestOptions into their +// equivalent HTTP header representation +func (opts CopyDestOptions) Marshal(header http.Header) { + const replaceDirective = "REPLACE" + if opts.ReplaceTags { + header.Set(amzTaggingHeaderDirective, replaceDirective) + if tags, _ := tags.NewTags(opts.UserTags, true); tags != nil { + header.Set(amzTaggingHeader, tags.String()) + } + } + + if opts.LegalHold != LegalHoldStatus("") { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { + header.Set(amzLockMode, opts.Mode.String()) + header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.Encryption != nil { + opts.Encryption.Marshal(header) + } + if opts.ContentType != "" { + header.Set("Content-Type", opts.ContentType) + } + if opts.ContentEncoding != "" { + header.Set("Content-Encoding", opts.ContentEncoding) + } + if opts.ContentDisposition != "" { + header.Set("Content-Disposition", opts.ContentDisposition) + } + if opts.ContentLanguage != "" { + header.Set("Content-Language", opts.ContentLanguage) + } + if opts.CacheControl != "" { + header.Set("Cache-Control", opts.CacheControl) + } + if !opts.Expires.IsZero() { + header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat)) + } + if opts.ChecksumType.IsSet() { + header.Set(amzChecksumAlgo, opts.ChecksumType.String()) + } + + if opts.ReplaceMetadata { + header.Set("x-amz-metadata-directive", replaceDirective) + for k, v := range filterCustomMeta(opts.UserMetadata) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + } +} + +// toDestinationInfo returns a validated copyOptions object. +func (opts CopyDestOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Progress != nil && opts.Size < 0 { + return errInvalidArgument("For progress bar effective size needs to be specified") + } + return nil +} + +// CopySrcOptions represents a source object to be copied, using +// server-side copying APIs. +type CopySrcOptions struct { + Bucket, Object string + VersionID string + MatchETag string + NoMatchETag string + MatchModifiedSince time.Time + MatchUnmodifiedSince time.Time + MatchRange bool + Start, End int64 + Encryption encrypt.ServerSide +} + +// Marshal converts all the CopySrcOptions into their +// equivalent HTTP header representation +func (opts CopySrcOptions) Marshal(header http.Header) { + // Set the source header + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) + if opts.VersionID != "" { + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) + } + + if opts.MatchETag != "" { + header.Set("x-amz-copy-source-if-match", opts.MatchETag) + } + if opts.NoMatchETag != "" { + header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) + } + + if !opts.MatchModifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) + } + if !opts.MatchUnmodifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) + } + + if opts.Encryption != nil { + encrypt.SSECopy(opts.Encryption).Marshal(header) + } +} + +func (opts CopySrcOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Start > opts.End || opts.Start < 0 { + return errInvalidArgument("start must be non-negative, and start must be at most end.") + } + return nil +} + +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, +) (ObjectInfo, error) { + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + if !dstOpts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) + } + if !dstOpts.Internal.SourceMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if dstOpts.Internal.SourceETag != "" { + headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) + } + if dstOpts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "true") + } + if dstOpts.Internal.ReplicationValidityCheck { + headers.Set(minIOBucketReplicationCheck, "true") + } + if !dstOpts.Internal.LegalholdTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.RetentionTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.TaggingTimestamp.IsZero() { + headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + + if len(dstOpts.UserTags) != 0 { + if tags, _ := tags.NewTags(dstOpts.UserTags, true); tags != nil { + headers.Set(amzTaggingHeader, tags.String()) + } + } + + reqMetadata := requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + } + if dstOpts.Internal.SourceVersionID != "" { + if dstOpts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { + return ObjectInfo{}, errInvalidArgument(err.Error()) + } + } + urlValues := make(url.Values) + urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + if srcOpts.VersionID != "" { + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) + } + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + +func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { + headers := make(http.Header) + + // Set source + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + if startOffset < 0 { + return p, errInvalidArgument("startOffset must be non-negative") + } + + if length >= 0 { + headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } + + for k, v := range metadata { + headers.Set(k, v) + } + + queryValues := make(url.Values) + queryValues.Set("partNumber", strconv.Itoa(partID)) + queryValues.Set("uploadId", uploadID) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + queryValues: queryValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, destBucket, destObject) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partID, cpObjRes.ETag + return p, nil +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header, +) (p CompletePart, err error) { + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObject - creates an object using server-side copying +// of existing objects. It takes a list of source objects (with optional offsets) +// and concatenates them into a new object using only server-side copying +// operations. Optionally takes progress reader hook for applications to +// look at current progress. +func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") + } + + for _, src := range srcs { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + srcObjectInfos := make([]ObjectInfo, len(srcs)) + srcObjectSizes := make([]int64, len(srcs)) + var totalSize, totalParts int64 + var err error + for i, src := range srcs { + opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} + srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) + if err != nil { + return UploadInfo{}, err + } + + srcCopySize := srcObjectInfos[i].Size + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.MatchRange { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.End >= srcCopySize || src.Start < 0 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.Start, src.End, srcCopySize)) + } + srcCopySize = src.End - src.Start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if srcCopySize < absMinPartSize && i < len(srcs)-1 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) + } + + // Is data to copy too large? + totalSize += srcCopySize + if totalSize > maxMultipartPutObjectSize { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcObjectSizes[i] = srcCopySize + + // calculate parts needed for current source + totalParts += partsRequired(srcCopySize) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObject(ctx, dst, srcs[0]) + } + + // Now, handle multipart-copy cases. + + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + src.MatchETag = srcObjectInfos[i].ETag + } + + // 2. Initiate a new multipart upload. + + // Set user-metadata on the destination object. If no + // user-metadata is specified, and there is only one source, + // (only) then metadata from source is copied. + var userMeta map[string]string + if dst.ReplaceMetadata { + userMeta = dst.UserMetadata + } else { + userMeta = srcObjectInfos[0].UserMetadata + } + + var userTags map[string]string + if dst.ReplaceTags { + userTags = dst.UserTags + } else { + userTags = srcObjectInfos[0].UserTags + } + + uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ + ServerSideEncryption: dst.Encryption, + UserMetadata: userMeta, + UserTags: userTags, + Mode: dst.Mode, + RetainUntilDate: dst.RetainUntilDate, + LegalHold: dst.LegalHold, + }) + if err != nil { + return UploadInfo{}, err + } + + // 3. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + h := make(http.Header) + src.Marshal(h) + if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { + dst.Encryption.Marshal(h) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(ctx, dst.Bucket, + dst.Object, uploadID, partIndex, h) + if err != nil { + return UploadInfo{}, err + } + if dst.Progress != nil { + io.CopyN(io.Discard, dst.Progress, end-start+1) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 4. Make final complete-multipart request. + uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, + completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalSize + return uploadInfo, nil +} + +// partsRequired is maximum parts possible with +// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) +func partsRequired(size int64) int64 { + maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) + r := size / int64(maxPartSize) + if size%int64(maxPartSize) > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { + if size == 0 { + return startIndex, endIndex + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.Start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return startIndex, endIndex +} diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go new file mode 100644 index 000000000000..b6cadc86a929 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -0,0 +1,76 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" +) + +// CopyObject - copy a source object into a new object +func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + header := make(http.Header) + dst.Marshal(header) + src.Marshal(header) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: dst.Bucket, + objectName: dst.Object, + customHeader: header, + }) + if err != nil { + return UploadInfo{}, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) + } + + // Update the progress properly after successful copy. + if dst.Progress != nil { + io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size)) + } + + cpObjRes := copyObjectResult{} + if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { + return UploadInfo{}, err + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: dst.Bucket, + Key: dst.Object, + LastModified: cpObjRes.LastModified, + ETag: trimEtag(cpObjRes.ETag), + VersionID: resp.Header.Get(amzVersionID), + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go new file mode 100644 index 000000000000..56af1687080b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -0,0 +1,262 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` + // BucketRegion region where the bucket is present + BucketRegion string `json:"bucketRegion"` +} + +// StringMap represents map with custom UnmarshalXML +type StringMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error { + *m = StringMap{} + for { + // Format is value + var e struct { + XMLName xml.Name + Value string `xml:",chardata"` + } + err := d.Decode(&e) + if err == io.EOF { + break + } + if err != nil { + return err + } + (*m)[e.XMLName.Local] = e.Value + } + return nil +} + +// URLMap represents map with custom UnmarshalXML +type URLMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error { + *m = URLMap{} + var tgs string + if err := d.DecodeElement(&tgs, &se); err != nil { + if err == io.EOF { + return nil + } + return err + } + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err := url.QueryUnescape(key) + if err != nil { + return err + } + + value, err = url.QueryUnescape(value) + if err != nil { + return err + } + (*m)[key] = value + } + return nil +} + +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +// Owner name. +type Owner struct { + XMLName xml.Name `xml:"Owner" json:"owner"` + DisplayName string `xml:"ID" json:"name"` + ID string `xml:"DisplayName" json:"id"` +} + +// UploadInfo contains information about the +// newly uploaded or copied object. +type UploadInfo struct { + Bucket string + Key string + ETag string + Size int64 + LastModified time.Time + Location string + VersionID string + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + + // Verified checksum values, if any. + // Values are base64 (standard) encoded. + // For multipart objects this is a checksum of the checksum of each part. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string + ChecksumMode string +} + +// RestoreInfo contains information of the restore operation of an archived object +type RestoreInfo struct { + // Is the restoring operation is still ongoing + OngoingRestore bool + // When the restored copy of the archived object will be removed + ExpiryTime time.Time +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata" xml:"-"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + // Only returned by MinIO servers. + UserMetadata StringMap `json:"userMetadata,omitempty"` + + // x-amz-tagging values in their k/v values. + // Only returned by MinIO servers. + UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` + + // x-amz-tagging-count value + UserTagCount int + + // Owner name. + Owner Owner + + // ACL grant. + Grant []Grant + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Versioning related information + IsLatest bool + IsDeleteMarker bool + VersionID string `xml:"VersionId"` + + // x-amz-replication-status value is either in one of the following states + // - COMPLETED + // - PENDING + // - FAILED + // - REPLICA (on the destination) + ReplicationStatus string `xml:"ReplicationStatus"` + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady bool + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + // NumVersions is the number of versions of the object. + NumVersions int + + Restore *RestoreInfo + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string + ChecksumMode string + + Internal *struct { + K int // Data blocks + M int // Parity blocks + } `xml:"Internal"` + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go new file mode 100644 index 000000000000..e5f88d98e193 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -0,0 +1,295 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "strings" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns S3 error string. +func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Empty http response. " + reportIssue + return errInvalidArgument(msg) + } + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Server: resp.Header.Get("Server"), + } + + _, success := successStatus[resp.StatusCode] + + errBody, err := xmlDecodeAndBody(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + var unmarshalErr xml.UnmarshalError + if success && errors.As(err, &unmarshalErr) { + // This is a successful message so not an error response + // return nil, + return nil + } + + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: NoSuchBucket, + Message: s3ErrorResponseMap[NoSuchBucket], + BucketName: bucketName, + } + } else { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: NoSuchKey, + Message: s3ErrorResponseMap[NoSuchKey], + BucketName: bucketName, + Key: objectName, + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: AccessDenied, + Message: s3ErrorResponseMap[AccessDenied], + BucketName: bucketName, + Key: objectName, + } + case http.StatusConflict: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: Conflict, + Message: s3ErrorResponseMap[Conflict], + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: PreconditionFailed, + Message: s3ErrorResponseMap[PreconditionFailed], + BucketName: bucketName, + Key: objectName, + } + default: + msg := resp.Status + if len(errBody) > 0 { + msg = string(errBody) + if len(msg) > 1024 { + msg = msg[:1024] + "..." + } + } + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: msg, + BucketName: bucketName, + } + } + } + + code := resp.Header.Get("x-minio-error-code") + if code != "" { + errResp.Code = code + } + desc := resp.Header.Get("x-minio-error-desc") + if desc != "" { + errResp.Message = strings.Trim(desc, `"`) + } + + // Save hostID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + if errResp.Code == InvalidRegion && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) + } + + return errResp +} + +// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func errTransferAccelerationBucket(bucketName string) error { + msg := "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’." + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: InvalidArgument, + Message: msg, + BucketName: bucketName, + } +} + +// errEntityTooLarge - Input size is larger than supported maximum. +func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: EntityTooLarge, + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errEntityTooSmall - Input size is smaller than supported minimum. +func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: EntityTooSmall, + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errUnexpectedEOF - Unexpected end of file reached. +func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: UnexpectedEOF, + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: InvalidArgument, + Message: message, + RequestID: "minio", + } +} + +// errAPINotSupported - API not supported response +// The specified API call is not supported +func errAPINotSupported(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotImplemented, + Code: APINotSupported, + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go new file mode 100644 index 000000000000..5864f0260d05 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -0,0 +1,152 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "net/http" + "net/url" +) + +// Grantee represents the person being granted permissions. +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` +} + +// Grant holds grant information +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee + Permission string `xml:"Permission"` +} + +// AccessControlList contains the set of grantees and the permissions assigned to each grantee. +type AccessControlList struct { + XMLName xml.Name `xml:"AccessControlList"` + Grant []Grant + Permission string `xml:"Permission"` +} + +type accessControlPolicy struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner + AccessControlList AccessControlList +} + +// GetObjectACL get object ACLs +func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + objInfo.Owner.DisplayName = res.Owner.DisplayName + objInfo.Owner.ID = res.Owner.ID + + objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch g.Permission { + case "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go new file mode 100644 index 000000000000..d2e8cabded9a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go @@ -0,0 +1,201 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "errors" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ObjectAttributesOptions are options used for the GetObjectAttributes API +// +// - MaxParts +// How many parts the caller wants to be returned (default: 1000) +// +// - VersionID +// The object version you want to attributes for +// +// - PartNumberMarker +// the listing will start AFTER the part matching PartNumberMarker +// +// - ServerSideEncryption +// The server-side encryption algorithm used when storing this object in Minio +type ObjectAttributesOptions struct { + MaxParts int + VersionID string + PartNumberMarker int + ServerSideEncryption encrypt.ServerSide +} + +// ObjectAttributes is the response object returned by the GetObjectAttributes API +// +// - VersionID +// The object version +// +// - LastModified +// The last time the object was modified +// +// - ObjectAttributesResponse +// Contains more information about the object +type ObjectAttributes struct { + VersionID string + LastModified time.Time + ObjectAttributesResponse +} + +// ObjectAttributesResponse contains details returned by the GetObjectAttributes API +// +// Noteworthy fields: +// +// - ObjectParts.PartsCount +// Contains the total part count for the object (not the current response) +// +// - ObjectParts.PartNumberMarker +// Pagination of parts will begin at (but not include) PartNumberMarker +// +// - ObjectParts.NextPartNumberMarket +// The next PartNumberMarker to be used in order to continue pagination +// +// - ObjectParts.IsTruncated +// Indicates if the last part is included in the request (does not check if parts are missing from the start of the list, ONLY the end) +// +// - ObjectParts.MaxParts +// Reflects the MaxParts used by the caller or the default MaxParts value of the API +type ObjectAttributesResponse struct { + ETag string `xml:",omitempty"` + StorageClass string + ObjectSize int + Checksum struct { + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + } + ObjectParts struct { + PartsCount int + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + IsTruncated bool + Parts []*ObjectAttributePart `xml:"Part"` + } +} + +// ObjectAttributePart is used by ObjectAttributesResponse to describe an object part +type ObjectAttributePart struct { + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + PartNumber int + Size int +} + +func (o *ObjectAttributes) parseResponse(resp *http.Response) (err error) { + mod, err := parseRFC7231Time(resp.Header.Get("Last-Modified")) + if err != nil { + return err + } + o.LastModified = mod + o.VersionID = resp.Header.Get(amzVersionID) + + response := new(ObjectAttributesResponse) + if err := xml.NewDecoder(resp.Body).Decode(response); err != nil { + return err + } + o.ObjectAttributesResponse = *response + + return err +} + +// GetObjectAttributes API combines HeadObject and ListParts. +// More details on usage can be found in the documentation for ObjectAttributesOptions{} +func (c *Client) GetObjectAttributes(ctx context.Context, bucketName, objectName string, opts ObjectAttributesOptions) (*ObjectAttributes, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Add("attributes", "") + if opts.VersionID != "" { + urlValues.Add("versionId", opts.VersionID) + } + + headers := make(http.Header) + headers.Set(amzObjectAttributes, GetObjectAttributesTags) + + if opts.PartNumberMarker > 0 { + headers.Set(amzPartNumberMarker, strconv.Itoa(opts.PartNumberMarker)) + } + + if opts.MaxParts > 0 { + headers.Set(amzMaxParts, strconv.Itoa(opts.MaxParts)) + } else { + headers.Set(amzMaxParts, strconv.Itoa(GetObjectAttributesMaxParts)) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(headers) + } + + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + if err != nil { + return nil, err + } + + defer closeResponse(resp) + + hasEtag := resp.Header.Get(ETag) + if hasEtag != "" { + return nil, errors.New("getObjectAttributes is not supported by the current endpoint version") + } + + if resp.StatusCode != http.StatusOK { + ER := new(ErrorResponse) + if err := xml.NewDecoder(resp.Body).Decode(ER); err != nil { + return nil, err + } + + return nil, *ER + } + + OA := new(ObjectAttributes) + err = OA.parseResponse(resp) + if err != nil { + return nil, err + } + + return OA, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go new file mode 100644 index 000000000000..6ef9c9330eef --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -0,0 +1,127 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FGetObject - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return errInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0o700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filepath.Join(filepath.Dir(filePath), sum256Hex([]byte(filepath.Base(filePath)+objectStat.ETag))+".part.minio") + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) + if err != nil { + return err + } + + // If we return early with an error, be sure to close and delete + // filePart. If we have an error along the way there is a chance + // that filePart is somehow damaged, and we should discard it. + closeAndRemove := true + defer func() { + if closeAndRemove { + _ = filePart.Close() + _ = os.Remove(filePartPath) + } + }() + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Initialize get object request headers to set the + // appropriate range offsets to read from. + if st.Size() > 0 { + opts.SetRange(st.Size(), 0) + } + + // Seek to current position for incoming reader. + objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + closeAndRemove = false + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go new file mode 100644 index 000000000000..d3cb6c22a05c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -0,0 +1,699 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "sync" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// GetObject wrapper function that accepts a request context +func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: InvalidBucketName, + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: XMinioInvalidObjectName, + Message: err.Error(), + } + } + + gctx, cancel := context.WithCancel(ctx) + + // Detect if snowball is server location we are talking to. + var snowball bool + if location, ok := c.bucketLocCache.Get(bucketName); ok { + snowball = location == "snowball" + } + + var ( + err error + httpReader io.ReadCloser + objectInfo ObjectInfo + totalRead int + ) + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(resCh) + defer func() { + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + }() + defer cancel() + + // Used to verify if etag of object has changed since last read. + var etag string + + for req := range reqCh { + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{Error: err} + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(size) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: size, + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } else { + // Remove range header if already set + delete(opts.headers, "Range") + } + httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + totalRead = 0 + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(totalRead) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + + // Reply back how much was read. + resCh <- getResponse{ + Size: size, + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(gctx, cancel, reqCh, resCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + ctx context.Context + cancel context.CancelFunc + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + select { + case <-o.ctx.Done(): + return getResponse{}, o.ctx.Err() + case o.reqCh <- request: + } + + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil && response.Error != io.EOF { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, response.Error +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(b)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, errInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + }) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return 0, o.prevErr + } + + // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. + o.currOffset = offset + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != nil && o.prevErr != io.EOF { + return 0, o.prevErr + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + newOffset := o.currOffset + + // Switch through whence. + switch whence { + default: + return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { + return 0, io.EOF + } + newOffset = offset + case 1: + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + newOffset += offset + case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") + } + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + newOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source when necessary + o.seekData = (newOffset != o.currOffset) || o.seekData + o.currOffset = newOffset + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + o.cancel() + + // Close the request channel to indicate the internal go-routine to exit. + close(o.reqCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object { + return &Object{ + ctx: ctx, + cancel: cancel, + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + // Validate input arguments. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: InvalidBucketName, + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: XMinioInvalidObjectName, + Message: err.Error(), + } + } + + // Execute GET on objectName. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + return nil, ObjectInfo{}, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) + if err != nil { + closeResponse(resp) + return nil, ObjectInfo{}, nil, err + } + + // do not close body here, caller will close + return resp.Body, objectStat, resp.Header, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go new file mode 100644 index 000000000000..a0216e2018bf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -0,0 +1,203 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// AdvancedGetOptions for internal use by MinIO server - not intended for client use. +type AdvancedGetOptions struct { + ReplicationDeleteMarker bool + IsReplicationReadyForDeleteMarker bool + ReplicationProxyRequest string +} + +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + reqParams url.Values + ServerSideEncryption encrypt.ServerSide + VersionID string + PartNumber int + + // Include any checksums, if object was uploaded with checksum. + // For multipart objects this is a checksum of part checksums. + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + Checksum bool + + // To be not used by external applications + Internal AdvancedGetOptions +} + +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions = GetObjectOptions + +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + // this header is set for active-active replication scenario where GET/HEAD + // to site A is proxy'd to site B if object/version missing on site A. + if o.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) + } + if o.Checksum { + headers.Set("x-amz-checksum-mode", "ENABLED") + } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// SetReqParam - set request query string parameter +// supported key: see supportedQueryValues and allowedCustomQueryPrefix. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) SetReqParam(key, value string) { + if !isCustomQueryValue(key) && !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Set(key, value) +} + +// AddReqParam - add request query string parameter +// supported key: see supportedQueryValues and allowedCustomQueryPrefix. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) AddReqParam(key, value string) { + if !isCustomQueryValue(key) && !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add(key, value) +} + +// SetMatchETag - set match etag. +func (o *GetObjectOptions) SetMatchETag(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-Match", "\""+etag+"\"") + return nil +} + +// SetMatchETagExcept - set match etag except. +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-None-Match", "\""+etag+"\"") + return nil +} + +// SetUnmodified - set unmodified time since. +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (o *GetObjectOptions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (o *GetObjectOptions) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + o.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return errInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} + +// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters. +func (o *GetObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.VersionID != "" { + urlValues.Set("versionId", o.VersionID) + } + if o.PartNumber > 0 { + urlValues.Set("partNumber", strconv.Itoa(o.PartNumber)) + } + + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-inventory-ext.go b/vendor/github.com/minio/minio-go/v7/api-inventory-ext.go new file mode 100644 index 000000000000..498300785fcf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-inventory-ext.go @@ -0,0 +1,332 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/json" + "io" + "iter" + "net/http" + "net/url" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// This file contains the inventory API extension for MinIO server. It is not +// compatible with AWS S3. + +func makeInventoryReqMetadata(bucket string, urlParams ...string) requestMetadata { + urlValues := make(url.Values) + urlValues.Set("minio-inventory", "") + + // If an odd number of parameters is given, we skip the last pair to avoid + // an out of bounds access. + for i := 0; i+1 < len(urlParams); i += 2 { + urlValues.Set(urlParams[i], urlParams[i+1]) + } + + return requestMetadata{ + bucketName: bucket, + queryValues: urlValues, + } +} + +// GenerateInventoryConfigYAML generates a YAML template for an inventory configuration. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration +// +// Returns a YAML template string that can be customized and used with PutBucketInventoryConfiguration. +func (c *Client) GenerateInventoryConfigYAML(ctx context.Context, bucket, id string) (string, error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return "", err + } + if id == "" { + return "", errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "generate", "", "id", id) + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucket, "") + } + buf := new(strings.Builder) + _, err = io.Copy(buf, resp.Body) + return buf.String(), err +} + +// inventoryPutConfigOpts is a placeholder for future options that may be added. +type inventoryPutConfigOpts struct{} + +// InventoryPutConfigOption is to allow for functional options for +// PutBucketInventoryConfiguration. It may be used in the future to customize +// the PutBucketInventoryConfiguration request, but currently does not do +// anything. +type InventoryPutConfigOption func(*inventoryPutConfigOpts) + +// PutBucketInventoryConfiguration creates or updates an inventory configuration for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration +// - yamlDef: YAML definition of the inventory configuration +// +// Returns an error if the operation fails, or if bucket name, id, or yamlDef is empty. +func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, bucket string, id string, yamlDef string, _ ...InventoryPutConfigOption) error { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return err + } + if id == "" { + return errInvalidArgument("inventory ID cannot be empty") + } + if yamlDef == "" { + return errInvalidArgument("YAML definition cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id) + reqMeta.contentBody = strings.NewReader(yamlDef) + reqMeta.contentLength = int64(len(yamlDef)) + reqMeta.contentMD5Base64 = sumMD5Base64([]byte(yamlDef)) + + resp, err := c.executeMethod(ctx, http.MethodPut, reqMeta) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucket, "") + } + return nil +} + +// GetBucketInventoryConfiguration retrieves the inventory configuration for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration +// +// Returns the inventory configuration or an error if the operation fails or if the configuration doesn't exist. +func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, bucket, id string) (*InventoryConfiguration, error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + if id == "" { + return nil, errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id) + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + decoder := json.NewDecoder(resp.Body) + var ic InventoryConfiguration + err = decoder.Decode(&ic) + if err != nil { + return nil, err + } + return &ic, nil +} + +// DeleteBucketInventoryConfiguration deletes an inventory configuration from a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory configuration to delete +// +// Returns an error if the operation fails or if the configuration doesn't exist. +func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, bucket, id string) error { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return err + } + if id == "" { + return errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id) + resp, err := c.executeMethod(ctx, http.MethodDelete, reqMeta) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucket, "") + } + return nil +} + +// InventoryConfiguration represents the inventory configuration +type InventoryConfiguration struct { + Bucket string `json:"bucket"` + ID string `json:"id"` + User string `json:"user"` + YamlDef string `json:"yamlDef,omitempty"` +} + +// InventoryListResult represents the result of listing inventory +// configurations. +type InventoryListResult struct { + Items []InventoryConfiguration `json:"items"` + NextContinuationToken string `json:"nextContinuationToken,omitempty"` +} + +// ListBucketInventoryConfigurations lists up to 100 inventory configurations for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - continuationToken: Token for pagination (empty string for first request) +// +// Returns a list result with configurations and a continuation token for the next page, or an error. +func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, bucket, continuationToken string) (lr *InventoryListResult, err error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + reqMeta := makeInventoryReqMetadata(bucket, "continuation-token", continuationToken) + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&lr) + if err != nil { + return nil, err + } + return lr, nil +} + +// ListBucketInventoryConfigurationsIterator returns an iterator that lists all inventory configurations +// for a bucket. This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// +// Returns an iterator that yields InventoryConfiguration values and errors. The iterator automatically +// handles pagination and fetches all configurations. +func (c *Client) ListBucketInventoryConfigurationsIterator(ctx context.Context, bucket string) iter.Seq2[InventoryConfiguration, error] { + return func(yield func(InventoryConfiguration, error) bool) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + yield(InventoryConfiguration{}, err) + return + } + var continuationToken string + for { + listResult, err := c.ListBucketInventoryConfigurations(ctx, bucket, continuationToken) + if err != nil { + yield(InventoryConfiguration{}, err) + return + } + + for _, item := range listResult.Items { + if !yield(item, nil) { + return + } + } + + if listResult.NextContinuationToken == "" { + return + } + continuationToken = listResult.NextContinuationToken + } + } +} + +// InventoryJobStatus represents the status of an inventory job. +type InventoryJobStatus struct { + Bucket string `json:"bucket"` + ID string `json:"id"` + User string `json:"user"` + AccessKey string `json:"accessKey"` + Schedule string `json:"schedule"` + State string `json:"state"` + NextScheduledTime time.Time `json:"nextScheduledTime,omitempty"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + LastUpdate time.Time `json:"lastUpdate,omitempty"` + Scanned string `json:"scanned,omitempty"` + Matched string `json:"matched,omitempty"` + ScannedCount uint64 `json:"scannedCount,omitempty"` + MatchedCount uint64 `json:"matchedCount,omitempty"` + RecordsWritten uint64 `json:"recordsWritten,omitempty"` + OutputFilesCount uint64 `json:"outputFilesCount,omitempty"` + ExecutionTime string `json:"executionTime,omitempty"` + NumStarts uint64 `json:"numStarts,omitempty"` + NumErrors uint64 `json:"numErrors,omitempty"` + NumLockLosses uint64 `json:"numLockLosses,omitempty"` + ManifestPath string `json:"manifestPath,omitempty"` + RetryAttempts uint64 `json:"retryAttempts,omitempty"` + LastFailTime time.Time `json:"lastFailTime,omitempty"` + LastFailErrors []string `json:"lastFailErrors,omitempty"` +} + +// GetBucketInventoryJobStatus retrieves the status of an inventory job for a bucket. +// This is a MinIO-specific API and is not compatible with AWS S3. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucket: Name of the bucket +// - id: Unique identifier for the inventory job +// +// Returns the inventory job status including execution state, progress, and error information, or an error if the operation fails. +func (c *Client) GetBucketInventoryJobStatus(ctx context.Context, bucket, id string) (*InventoryJobStatus, error) { + if err := s3utils.CheckValidBucketName(bucket); err != nil { + return nil, err + } + if id == "" { + return nil, errInvalidArgument("inventory ID cannot be empty") + } + reqMeta := makeInventoryReqMetadata(bucket, "id", id, "status", "") + resp, err := c.executeMethod(ctx, http.MethodGet, reqMeta) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "") + } + decoder := json.NewDecoder(resp.Body) + var jStatus InventoryJobStatus + err = decoder.Decode(&jStatus) + if err != nil { + return nil, err + } + return &jStatus, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go new file mode 100644 index 000000000000..5bf67a66607f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -0,0 +1,1136 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "iter" + "net/http" + "net/url" + "slices" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } +func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +// ListDirectoryBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// dirBuckets, err := api.ListDirectoryBuckets(context.Background()) +func (c *Client) ListDirectoryBuckets(ctx context.Context) (iter.Seq2[BucketInfo, error], error) { + fetchBuckets := func(continuationToken string) ([]BucketInfo, string, error) { + metadata := requestMetadata{contentSHA256Hex: emptySHA256Hex} + metadata.queryValues = url.Values{} + metadata.queryValues.Set("max-directory-buckets", "1000") + if continuationToken != "" { + metadata.queryValues.Set("continuation-token", continuationToken) + } + + // Execute GET on service. + resp, err := c.executeMethod(ctx, http.MethodGet, metadata) + defer closeResponse(resp) + if err != nil { + return nil, "", err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, "", httpRespToErrorResponse(resp, "", "") + } + } + + results := listAllMyDirectoryBucketsResult{} + if err = xmlDecoder(resp.Body, &results); err != nil { + return nil, "", err + } + + return results.Buckets.Bucket, results.ContinuationToken, nil + } + + return func(yield func(BucketInfo, error) bool) { + var continuationToken string + for { + buckets, token, err := fetchBuckets(continuationToken) + if err != nil { + yield(BucketInfo{}, err) + return + } + for _, bucket := range buckets { + if !yield(bucket, nil) { + return + } + } + if token == "" { + // nothing to continue + return + } + continuationToken = token + } + }, nil +} + +// Bucket List Operations. +func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) iter.Seq[ObjectInfo] { + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Return object owner information by default + fetchOwner := true + + return func(yield func(ObjectInfo) bool) { + if contextCanceled(ctx) { + return + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + yield(ObjectInfo{Err: err}) + return + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + yield(ObjectInfo{Err: err}) + return + } + + // Save continuationToken for next request. + var continuationToken string + for { + if contextCanceled(ctx) { + return + } + + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, + fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) + if err != nil { + yield(ObjectInfo{Err: err}) + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + object.ETag = trimEtag(object.ETag) + if !yield(object) { + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + if !yield(ObjectInfo{Key: obj.Prefix}) { + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + + // Add this to catch broken S3 API implementations. + if continuationToken == "" { + if !yield(ObjectInfo{ + Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is buggy", c.endpointURL), + }) { + return + } + } + } + } +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?continuation-token - Used to continue iterating over a set of objects +// ?metadata - Specifies if we want metadata for the objects as part of list operation. +// ?delimiter - A delimiter is a character you use to group keys. +// ?start-after - Sets a marker to start listing lexically at this key onwards. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + if metadata { + urlValues.Set("metadata", "true") + } + + // Set this conditionally if asked + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + + // Always set encoding-type in ListObjects V2 + urlValues.Set("encoding-type", "url") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { + return listBucketResult, err + } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, ErrorResponse{ + Code: NotImplemented, + Message: "Truncated response should have continuation token set", + } + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + // Success. + return listBucketResult, nil +} + +func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) iter.Seq[ObjectInfo] { + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + return func(yield func(ObjectInfo) bool) { + if contextCanceled(ctx) { + return + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + yield(ObjectInfo{Err: err}) + return + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + yield(ObjectInfo{Err: err}) + return + } + + marker := opts.StartAfter + for { + if contextCanceled(ctx) { + return + } + + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) + if err != nil { + yield(ObjectInfo{Err: err}) + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + object.ETag = trimEtag(object.ETag) + if !yield(object) { + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + if !yield(ObjectInfo{Key: obj.Prefix}) { + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + } +} + +func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) iter.Seq[ObjectInfo] { + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + return func(yield func(ObjectInfo) bool) { + if contextCanceled(ctx) { + return + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + yield(ObjectInfo{Err: err}) + return + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + yield(ObjectInfo{Err: err}) + return + } + + var ( + keyMarker = "" + versionIDMarker = "" + preName = "" + preKey = "" + perVersions []Version + numVersions int + ) + + send := func(vers []Version) bool { + if opts.WithVersions && opts.ReverseVersions { + slices.Reverse(vers) + numVersions = len(vers) + } + for _, version := range vers { + info := ObjectInfo{ + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified.Truncate(time.Millisecond), + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, + IsDeleteMarker: version.isDeleteMarker, + UserTags: version.UserTags, + UserMetadata: version.UserMetadata, + Internal: version.Internal, + NumVersions: numVersions, + ChecksumMode: version.ChecksumType, + ChecksumCRC32: version.ChecksumCRC32, + ChecksumCRC32C: version.ChecksumCRC32C, + ChecksumSHA1: version.ChecksumSHA1, + ChecksumSHA256: version.ChecksumSHA256, + ChecksumCRC64NVME: version.ChecksumCRC64NVME, + } + if !yield(info) { + return false + } + } + return true + } + for { + if contextCanceled(ctx) { + return + } + + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter) + if err != nil { + yield(ObjectInfo{Err: err}) + return + } + + if opts.WithVersions && opts.ReverseVersions { + for _, version := range result.Versions { + if preName == "" { + preName = result.Name + preKey = version.Key + } + if result.Name == preName && preKey == version.Key { + // If the current name is same as previous name, + // we need to append the version to the previous version. + perVersions = append(perVersions, version) + continue + } + // Send the file versions. + if !send(perVersions) { + return + } + perVersions = perVersions[:0] + perVersions = append(perVersions, version) + preName = result.Name + preKey = version.Key + } + } else { + if !send(result.Versions) { + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + if !yield(ObjectInfo{Key: obj.Prefix}) { + return + } + } + + // If next key marker is present, save it for next request. + if result.NextKeyMarker != "" { + keyMarker = result.NextKeyMarker + } + + // If next version id marker is present, save it for next request. + if result.NextVersionIDMarker != "" { + versionIDMarker = result.NextVersionIDMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + // sent the lasted file with versions + if opts.ReverseVersions && len(perVersions) > 0 { + if !send(perVersions) { + return + } + } + return + } + } + } +} + +// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects +// and their versions in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?key-marker - Specifies the key to start with when listing objects in a bucket. +// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListVersionsResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + return ListVersionsResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set versions to trigger versioning API + urlValues.Set("versions", "") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", opts.Prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + + // Set max keys. + if opts.MaxKeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys)) + } + + // Set version ID marker + if versionIDMarker != "" { + urlValues.Set("version-id-marker", versionIDMarker) + } + + if opts.WithMetadata { + urlValues.Set("metadata", "true") + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: opts.headers, + }) + defer closeResponse(resp) + if err != nil { + return ListVersionsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode ListVersionsResult XML. + listObjectVersionsOutput := ListVersionsResult{} + err = xmlDecoder(resp.Body, &listObjectVersionsOutput) + if err != nil { + return ListVersionsResult{}, err + } + + for i, obj := range listObjectVersionsOutput.Versions { + listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + for i, obj := range listObjectVersionsOutput.CommonPrefixes { + listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + if listObjectVersionsOutput.NextKeyMarker != "" { + listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + return listObjectVersionsOutput, nil +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := ListBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + if listBucketResult.NextMarker != "" { + listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + return listBucketResult, nil +} + +// ListObjectsOptions holds all options of a list object request +type ListObjectsOptions struct { + // ReverseVersions - reverse the order of the object versions + ReverseVersions bool + // Include objects versions in the listing + WithVersions bool + // Include objects metadata in the listing + WithMetadata bool + // Only list objects with the prefix + Prefix string + // Ignore '/' delimiter + Recursive bool + // The maximum number of objects requested per + // batch, advanced use-case not useful for most + // applications + MaxKeys int + // StartAfter start listing lexically at this + // object onwards, this value can also be set + // for Marker when `UseV1` is set to true. + StartAfter string + + // Use the deprecated list objects V1 API + UseV1 bool + + headers http.Header +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *ListObjectsOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(http.Header) + } + o.headers.Set(key, value) +} + +// ListObjects returns objects list after evaluating the passed options. +// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } +// +// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error() +// caller must drain the channel entirely and wait until channel is closed before proceeding, without +// waiting on the channel to be closed completely you might leak goroutines. +func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + objectStatCh := make(chan ObjectInfo, 1) + go func() { + defer close(objectStatCh) + if contextCanceled(ctx) { + objectStatCh <- ObjectInfo{Err: ctx.Err()} + return + } + + var objIter iter.Seq[ObjectInfo] + switch { + case opts.WithVersions: + objIter = c.listObjectVersions(ctx, bucketName, opts) + case opts.UseV1: + objIter = c.listObjects(ctx, bucketName, opts) + default: + location, _ := c.bucketLocCache.Get(bucketName) + if location == "snowball" { + objIter = c.listObjects(ctx, bucketName, opts) + } else { + objIter = c.listObjectsV2(ctx, bucketName, opts) + } + } + for obj := range objIter { + select { + case <-ctx.Done(): + objectStatCh <- ObjectInfo{Err: ctx.Err()} + return + case objectStatCh <- obj: + } + } + }() + return objectStatCh +} + +// ListObjectsIter returns object list as a iterator sequence. +// caller must cancel the context if they are not interested in +// iterating further, if no more entries the iterator will +// automatically stop. +// +// api := client.New(....) +// for object := range api.ListObjectsIter(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// if object.Err != nil { +// // handle the errors. +// } +// fmt.Println(object) +// } +// +// Canceling the context the iterator will stop, if you wish to discard the yielding make sure +// to cancel the passed context without that you might leak coroutines +func (c *Client) ListObjectsIter(ctx context.Context, bucketName string, opts ListObjectsOptions) iter.Seq[ObjectInfo] { + if opts.WithVersions { + return c.listObjectVersions(ctx, bucketName, opts) + } + + // Use legacy list objects v1 API + if opts.UseV1 { + return c.listObjects(ctx, bucketName, opts) + } + + // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + return c.listObjects(ctx, bucketName, opts) + } + } + + return c.listObjectsV2(ctx, bucketName, opts) +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) +} + +// contextCanceled returns whether a context is canceled. +func contextCanceled(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// listIncompleteUploads lists all incomplete uploads. +func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer func() { + if contextCanceled(ctx) { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: ctx.Err(), + } + } + close(objectMultipartStatCh) + }() + + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If the context is canceled + case <-ctx.Done(): + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: + // If context is canceled. + case <-ctx.Done(): + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploadsQuery - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // maxUploads should be 1000 or less. + if maxUploads > 0 { + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + } + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := ListMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + for i, obj := range listMultipartUploadsResult.Uploads { + listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + for i, obj := range listMultipartUploadsResult.CommonPrefixes { + listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +// +//lint:ignore U1000 Keep this around +func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]ObjectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = trimEtag(part.ETag) + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { + var uploadIDs []string + // Make list incomplete uploads recursive. + isRecursive := true + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { + if mpUpload.Err != nil { + return nil, mpUpload.Err + } + if objectName == mpUpload.Key { + uploadIDs = append(uploadIDs, mpUpload.UploadID) + } + } + // Return the latest upload id. + return uploadIDs, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts > 0 { + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + } + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := ListObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} + +// Decode an S3 object name according to the encoding type +func decodeS3Name(name, encodingType string) (string, error) { + switch encodingType { + case "url": + return url.QueryUnescape(name) + default: + return name, nil + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go new file mode 100644 index 000000000000..5ad9a494371d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -0,0 +1,193 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectLegalHold - object legal hold specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html +type objectLegalHold struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"LegalHold"` + Status LegalHoldStatus `xml:"Status,omitempty"` +} + +// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call +type PutObjectLegalHoldOptions struct { + VersionID string + Status *LegalHoldStatus +} + +// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call +type GetObjectLegalHoldOptions struct { + VersionID string +} + +// LegalHoldStatus - object legal hold status. +type LegalHoldStatus string + +const ( + // LegalHoldEnabled indicates legal hold is enabled + LegalHoldEnabled LegalHoldStatus = "ON" + + // LegalHoldDisabled indicates legal hold is disabled + LegalHoldDisabled LegalHoldStatus = "OFF" +) + +func (r LegalHoldStatus) String() string { + return string(r) +} + +// IsValid - check whether this legal hold status is valid or not. +func (r LegalHoldStatus) IsValid() bool { + return r == LegalHoldEnabled || r == LegalHoldDisabled +} + +func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { + if status == nil { + return nil, fmt.Errorf("Status not set") + } + if !status.IsValid() { + return nil, fmt.Errorf("invalid legal hold status `%v`", status) + } + legalHold := &objectLegalHold{ + Status: *status, + } + return legalHold, nil +} + +// PutObjectLegalHold sets the legal hold status for an object and specific version. +// Legal hold prevents an object version from being overwritten or deleted, regardless of retention settings. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including Status (LegalHoldEnabled or LegalHoldDisabled) and optional VersionID +// +// Returns an error if the operation fails or if the status is invalid. +func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + lh, err := newObjectLegalHold(opts.Status) + if err != nil { + return err + } + + lhData, err := xml.Marshal(lh) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(lhData), + contentLength: int64(len(lhData)), + contentMD5Base64: sumMD5Base64(lhData), + contentSHA256Hex: sum256Hex(lhData), + } + + // Execute PUT Object Legal Hold. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectLegalHold retrieves the legal hold status for an object and specific version. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including optional VersionID to target a specific version +// +// Returns the legal hold status (LegalHoldEnabled or LegalHoldDisabled) or an error if the operation fails. +func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + lh := &objectLegalHold{} + if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { + return nil, err + } + + return &lh.Status, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go new file mode 100644 index 000000000000..f0a439853f9c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go @@ -0,0 +1,241 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RetentionMode - object retention mode. +type RetentionMode string + +const ( + // Governance - governance mode. + Governance RetentionMode = "GOVERNANCE" + + // Compliance - compliance mode. + Compliance RetentionMode = "COMPLIANCE" +) + +func (r RetentionMode) String() string { + return string(r) +} + +// IsValid - check whether this retention mode is valid or not. +func (r RetentionMode) IsValid() bool { + return r == Governance || r == Compliance +} + +// ValidityUnit - retention validity unit. +type ValidityUnit string + +const ( + // Days - denotes no. of days. + Days ValidityUnit = "DAYS" + + // Years - denotes no. of years. + Years ValidityUnit = "YEARS" +) + +func (unit ValidityUnit) String() string { + return string(unit) +} + +// IsValid - check whether this validity unit is valid or not. +func (unit ValidityUnit) isValid() bool { + return unit == Days || unit == Years +} + +// Retention - bucket level retention configuration. +type Retention struct { + Mode RetentionMode + Validity time.Duration +} + +func (r Retention) String() string { + return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) +} + +// IsEmpty - returns whether retention is empty or not. +func (r Retention) IsEmpty() bool { + return r.Mode == "" || r.Validity == 0 +} + +// objectLockConfig - object lock configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectLockConfig struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"ObjectLockConfiguration"` + ObjectLockEnabled string `xml:"ObjectLockEnabled"` + Rule *struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + } `xml:"Rule,omitempty"` +} + +func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { + config := &objectLockConfig{ + ObjectLockEnabled: "Enabled", + } + + if mode != nil && validity != nil && unit != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + + if !unit.isValid() { + return nil, fmt.Errorf("invalid validity unit `%v`", unit) + } + + config.Rule = &struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + }{} + + config.Rule.DefaultRetention.Mode = *mode + if *unit == Days { + config.Rule.DefaultRetention.Days = validity + } else { + config.Rule.DefaultRetention.Years = validity + } + + return config, nil + } + + if mode == nil && validity == nil && unit == nil { + return config, nil + } + + return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") +} + +// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + config, err := newObjectLockConfig(mode, validity, unit) + if err != nil { + return err + } + + configData, err := xml.Marshal(config) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(configData), + contentLength: int64(len(configData)), + contentMD5Base64: sumMD5Base64(configData), + contentSHA256Hex: sum256Hex(configData), + } + + // Execute PUT bucket object lock configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// GetObjectLockConfig gets object lock configuration of given bucket. +func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", nil, nil, nil, err + } + + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return "", nil, nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + config := &objectLockConfig{} + if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { + return "", nil, nil, nil, err + } + + if config.Rule != nil { + mode = &config.Rule.DefaultRetention.Mode + if config.Rule.DefaultRetention.Days != nil { + validity = config.Rule.DefaultRetention.Days + days := Days + unit = &days + } else { + validity = config.Rule.DefaultRetention.Years + years := Years + unit = &years + } + return config.ObjectLockEnabled, mode, validity, unit, nil + } + return config.ObjectLockEnabled, nil, nil, nil, nil +} + +// GetBucketObjectLockConfig gets object lock configuration of given bucket. +func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) + return mode, validity, unit, err +} + +// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go new file mode 100644 index 000000000000..2efb5d89a6ae --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectRetention - object retention specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectRetention struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"Retention"` + Mode RetentionMode `xml:"Mode,omitempty"` + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` +} + +func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { + objectRetention := &objectRetention{} + + if date != nil && !date.IsZero() { + objectRetention.RetainUntilDate = date + } + if mode != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + objectRetention.Mode = *mode + } + + return objectRetention, nil +} + +// PutObjectRetentionOptions represents options specified by user for PutObject call +type PutObjectRetentionOptions struct { + GovernanceBypass bool + Mode *RetentionMode + RetainUntilDate *time.Time + VersionID string +} + +// PutObjectRetention sets the retention configuration for an object and specific version. +// Object retention prevents an object version from being deleted or overwritten for a specified period. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including Mode (GOVERNANCE or COMPLIANCE), RetainUntilDate, optional VersionID, and GovernanceBypass +// +// Returns an error if the operation fails or if the retention settings are invalid. +func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("retention", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) + if err != nil { + return err + } + + retentionData, err := xml.Marshal(retention) + if err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(retentionData), + contentLength: int64(len(retentionData)), + contentMD5Base64: sumMD5Base64(retentionData), + contentSHA256Hex: sum256Hex(retentionData), + customHeader: headers, + } + + // Execute PUT Object Retention. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectRetention retrieves the retention configuration for an object and specific version. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - versionID: Optional version ID to target a specific version (empty string for current version) +// +// Returns the retention mode (GOVERNANCE or COMPLIANCE), retain-until date, and any error. +func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, nil, err + } + urlValues := make(url.Values) + urlValues.Set("retention", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + retention := &objectRetention{} + if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { + return nil, nil, err + } + + return &retention.Mode, retention.RetainUntilDate, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go new file mode 100644 index 000000000000..66d13110678a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -0,0 +1,202 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// PutObjectTaggingOptions holds an object version id +// to update tag(s) of a specific object version +type PutObjectTaggingOptions struct { + VersionID string + Internal AdvancedObjectTaggingOptions +} + +// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use. +type AdvancedObjectTaggingOptions struct { + ReplicationProxyRequest string +} + +// PutObjectTagging replaces or creates object tag(s) and can target a specific object version +// in a versioned bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - otags: Tags to apply to the object +// - opts: Options including VersionID to target a specific version +// +// Returns an error if the operation fails. +func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + headers := make(http.Header, 0) + if opts.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) + } + reqBytes, err := xml.Marshal(otags) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(reqBytes), + contentLength: int64(len(reqBytes)), + contentMD5Base64: sumMD5Base64(reqBytes), + customHeader: headers, + } + + // Execute PUT to set a object tagging. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectTaggingOptions holds the object version ID +// to fetch the tagging key/value pairs +type GetObjectTaggingOptions struct { + VersionID string + Internal AdvancedObjectTaggingOptions +} + +// GetObjectTagging retrieves object tag(s) with options to target a specific object version +// in a versioned bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including VersionID to target a specific version +// +// Returns the object's tags or an error if the operation fails. +func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + headers := make(http.Header, 0) + if opts.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) + } + // Execute GET on object to get object tag(s) + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: headers, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return tags.ParseObjectXML(resp.Body) +} + +// RemoveObjectTaggingOptions holds the version id of the object to remove +type RemoveObjectTaggingOptions struct { + VersionID string + Internal AdvancedObjectTaggingOptions +} + +// RemoveObjectTagging removes object tag(s) with options to target a specific object version +// in a versioned bucket. +// +// Parameters: +// - ctx: Context for request cancellation and timeout +// - bucketName: Name of the bucket +// - objectName: Name of the object +// - opts: Options including VersionID to target a specific version +// +// Returns an error if the operation fails. +func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + headers := make(http.Header, 0) + if opts.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) + } + // Execute DELETE on object to remove object tag(s) + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: headers, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // S3 returns "204 No content" after Object tag deletion. + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go new file mode 100644 index 000000000000..29642200ee14 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -0,0 +1,228 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, errInvalidArgument("method cannot be empty.") + } + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err = isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + extraPresignHeader: extraHeaders, + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + var req *http.Request + if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// data without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedHeadObject - Returns a presigned URL to access +// object metadata without credentials. URL can have a maximum expiry +// of upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedPutObject - Returns a presigned URL to upload an object +// without credentials. URL can have a maximum expiry of upto 7days +// or a minimum of 1sec. +func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) +} + +// PresignHeader - similar to Presign() but allows including HTTP headers that +// will be used to build the signature. The request using the resulting URL will +// need to have the exact same headers to be added for signature validation to +// pass. +// +// FIXME: The extra header parameter should be included in Presign() in the next +// major version bump, and this function should then be deprecated. +func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) +} + +// Presign - returns a presigned URL for any http method of your choice along +// with custom request params and extra signed headers. URL can have a maximum +// expiry of upto 7days or a minimum of 1sec. +func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(ctx, bucketName) + if err != nil { + return nil, nil, err + } + + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) + if err != nil { + return nil, nil, err + } + + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.GetWithContext(c.CredContext()) + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if signerType.IsV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + p.formData["GoogleAccessId"] = accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = accessKeyID + } + // Sign the policy. + p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + return u, p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go new file mode 100644 index 000000000000..55c038ae0282 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// PromptObject performs language model inference with the prompt and referenced object as context. +// Inference is performed using a Lambda handler that can process the prompt and object. +// Currently, this functionality is limited to certain MinIO servers. +func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, prompt string, opts PromptObjectOptions) (io.ReadCloser, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: InvalidBucketName, + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: XMinioInvalidObjectName, + Message: err.Error(), + } + } + + opts.AddLambdaArnToReqParams(opts.LambdaArn) + opts.SetHeader("Content-Type", "application/json") + opts.AddPromptArg("prompt", prompt) + promptReqBytes, err := json.Marshal(opts.PromptArgs) + if err != nil { + return nil, err + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + customHeader: opts.Header(), + contentSHA256Hex: sum256Hex(promptReqBytes), + contentBody: bytes.NewReader(promptReqBytes), + contentLength: int64(len(promptReqBytes)), + }) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + defer closeResponse(resp) + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + return resp.Body, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-options.go b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go new file mode 100644 index 000000000000..4493a75d4c77 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go @@ -0,0 +1,84 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "net/url" +) + +// PromptObjectOptions provides options to PromptObject call. +// LambdaArn is the ARN of the Prompt Lambda to be invoked. +// PromptArgs is a map of key-value pairs to be passed to the inference action on the Prompt Lambda. +// "prompt" is a reserved key and should not be used as a key in PromptArgs. +type PromptObjectOptions struct { + LambdaArn string + PromptArgs map[string]any + headers map[string]string + reqParams url.Values +} + +// Header returns the http.Header representation of the POST options. +func (o PromptObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + return headers +} + +// AddPromptArg Add a key value pair to the prompt arguments where the key is a string and +// the value is a JSON serializable. +func (o *PromptObjectOptions) AddPromptArg(key string, value any) { + if o.PromptArgs == nil { + o.PromptArgs = make(map[string]any) + } + o.PromptArgs[key] = value +} + +// AddLambdaArnToReqParams adds the lambdaArn to the request query string parameters. +func (o *PromptObjectOptions) AddLambdaArnToReqParams(lambdaArn string) { + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add("lambdaArn", lambdaArn) +} + +// SetHeader adds a key value pair to the options. The +// key-value pair will be part of the HTTP POST request +// headers. +func (o *PromptObjectOptions) SetHeader(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// toQueryValues - Convert the reqParams in Options to query string parameters. +func (o *PromptObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go new file mode 100644 index 000000000000..47d8419e6f24 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -0,0 +1,130 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Bucket operations +func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } + + err = c.doMakeBucket(ctx, bucketName, opts) + if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { + if resp, ok := err.(ErrorResponse); ok && resp.Code == AuthorizationHeaderMalformed && resp.Region != "" { + opts.Region = resp.Region + err = c.doMakeBucket(ctx, bucketName, opts) + } + } + return err +} + +func (c *Client) doMakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, opts.Region) + } + }() + + // If location is empty, treat is a default region 'us-east-1'. + if opts.Region == "" { + opts.Region = "us-east-1" + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + opts.Region = c.region + } + } + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: opts.Region, + } + + headers := make(http.Header) + if opts.ObjectLocking { + headers.Add("x-amz-bucket-object-lock-enabled", "true") + } + if opts.ForceCreate { + headers.Add("x-minio-force-create", "true") + } + reqMetadata.customHeader = headers + + // If location is not 'us-east-1' create bucket location config. + if opts.Region != "us-east-1" && opts.Region != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = opts.Region + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return err + } + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Success. + return nil +} + +// MakeBucketOptions holds all options to tweak bucket creation +type MakeBucketOptions struct { + // Bucket location + Region string + // Enable object locking + ObjectLocking bool + + // ForceCreate - this is a MinIO specific extension. + ForceCreate bool +} + +// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + return c.makeBucket(ctx, bucketName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go new file mode 100644 index 000000000000..52f69563ca47 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -0,0 +1,149 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "math" + "os" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +const nullVersionID = "null" + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return ok +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } else { + _, ok = reader.(io.ReaderAt) + } + return ok +} + +// OptimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB +func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + var unknownSize bool + if objectSize == -1 { + unknownSize = true + objectSize = maxMultipartPutObjectSize + } + + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return totalPartsCount, partSize, lastPartSize, err + } + + var partSizeFlt float64 + if configuredPartSize > 0 { + if int64(configuredPartSize) > objectSize { + err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") + return totalPartsCount, partSize, lastPartSize, err + } + + if !unknownSize { + if objectSize > (int64(configuredPartSize) * maxPartsCount) { + err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") + return totalPartsCount, partSize, lastPartSize, err + } + } + + if configuredPartSize < absMinPartSize { + err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") + return totalPartsCount, partSize, lastPartSize, err + } + + if configuredPartSize > maxPartSize { + err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") + return totalPartsCount, partSize, lastPartSize, err + } + + partSizeFlt = float64(configuredPartSize) + if unknownSize { + // If input has unknown size and part size is configured + // keep it to maximum allowed as per 10000 parts. + objectSize = int64(configuredPartSize) * maxPartsCount + } + } else { + configuredPartSize = minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt = float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + } + + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go new file mode 100644 index 000000000000..3023b949cd44 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go @@ -0,0 +1,167 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2023 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/json" + "errors" + "io" + "mime/multipart" + "net/http" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// PutObjectFanOutEntry is per object entry fan-out metadata +type PutObjectFanOutEntry struct { + Key string `json:"key"` + UserMetadata map[string]string `json:"metadata,omitempty"` + UserTags map[string]string `json:"tags,omitempty"` + ContentType string `json:"contentType,omitempty"` + ContentEncoding string `json:"contentEncoding,omitempty"` + ContentDisposition string `json:"contentDisposition,omitempty"` + ContentLanguage string `json:"contentLanguage,omitempty"` + CacheControl string `json:"cacheControl,omitempty"` + Retention RetentionMode `json:"retention,omitempty"` + RetainUntilDate *time.Time `json:"retainUntil,omitempty"` +} + +// PutObjectFanOutRequest this is the request structure sent +// to the server to fan-out the stream to multiple objects. +type PutObjectFanOutRequest struct { + Entries []PutObjectFanOutEntry + Checksum Checksum + SSE encrypt.ServerSide +} + +// PutObjectFanOutResponse this is the response structure sent +// by the server upon success or failure for each object +// fan-out keys. Additionally, this response carries ETag, +// VersionID and LastModified for each object fan-out. +type PutObjectFanOutResponse struct { + Key string `json:"key"` + ETag string `json:"etag,omitempty"` + VersionID string `json:"versionId,omitempty"` + LastModified *time.Time `json:"lastModified,omitempty"` + Error string `json:"error,omitempty"` +} + +// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single +// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry +// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is +// mandatory, rest of the other options in PutObjectFanOutRequest are optional. +func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) { + if len(fanOutReq.Entries) == 0 { + return nil, errInvalidArgument("fan out requests cannot be empty") + } + + policy := NewPostPolicy() + policy.SetBucket(bucket) + policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16)) + + // Expires in 15 minutes. + policy.SetExpires(time.Now().UTC().Add(15 * time.Minute)) + + // Set encryption headers if any. + policy.SetEncryption(fanOutReq.SSE) + + // Set checksum headers if any. + err := policy.SetChecksum(fanOutReq.Checksum) + if err != nil { + return nil, err + } + + url, formData, err := c.PresignedPostPolicy(ctx, policy) + if err != nil { + return nil, err + } + + r, w := io.Pipe() + + req, err := http.NewRequest(http.MethodPost, url.String(), r) + if err != nil { + w.Close() + return nil, err + } + + var b strings.Builder + enc := json.NewEncoder(&b) + for _, req := range fanOutReq.Entries { + if req.Key == "" { + w.Close() + return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty") + } + if err = enc.Encode(&req); err != nil { + w.Close() + return nil, err + } + } + + mwriter := multipart.NewWriter(w) + req.Header.Add("Content-Type", mwriter.FormDataContentType()) + + go func() { + defer w.Close() + defer mwriter.Close() + + for k, v := range formData { + if err := mwriter.WriteField(k, v); err != nil { + return + } + } + + if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil { + return + } + + mw, err := mwriter.CreateFormFile("file", "fanout-content") + if err != nil { + return + } + + if _, err = io.Copy(mw, fanOutData); err != nil { + return + } + }() + + resp, err := c.do(req) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "fanout-content") + } + + dec := json.NewDecoder(resp.Body) + fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries)) + for dec.More() { + var m PutObjectFanOutResponse + if err = dec.Decode(&m); err != nil { + return nil, err + } + fanOutResp = append(fanOutResp, m) + } + + return fanOutResp, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go new file mode 100644 index 000000000000..4d29dfc18a44 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return UploadInfo{}, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return UploadInfo{}, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go new file mode 100644 index 000000000000..6a3e9f092242 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -0,0 +1,463 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { + info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + customHeader := make(http.Header) + crc := opts.AutoChecksum.Hasher() + for partNumber <= totalPartsCount { + length, rErr := readFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { + break + } + + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculates hash sums while copying partSize bytes into cw. + for k, v := range hashAlgos { + v.Write(buf[:length]) + hashSums[k] = v.Sum(nil) + v.Close() + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } + if opts.AutoChecksum.IsSet() { + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } + } + + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + // Proceed to upload the part. + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + allParts = append(allParts, part) + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + applyAutoChecksum(&opts, allParts) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + } + } + urlValues.Set("versionId", opts.Internal.SourceVersionID) + } + + // Set ContentType header. + customHeader := opts.Header() + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + +// uploadPart - Uploads a part in a multipart upload. +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { + return ObjectPart{}, err + } + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { + return ObjectPart{}, err + } + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) + } + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) + } + if p.partNumber <= 0 { + return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") + } + if p.uploadID == "" { + return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) + // Set upload id. + urlValues.Set("uploadId", p.uploadID) + + // Set encryption headers, if any. + if p.customHeader == nil { + p.customHeader = make(http.Header) + } + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) + } + + reqMetadata := requestMetadata{ + bucketName: p.bucketName, + objectName: p.objectName, + queryValues: urlValues, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, + } + + // Execute PUT on each part. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) + } + } + // Once successfully uploaded, return completed part. + h := resp.Header + objPart := ObjectPart{ + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), + } + objPart.Size = p.size + objPart.PartNumber = p.partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = trimEtag(h.Get("ETag")) + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload, opts PutObjectOptions, +) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return UploadInfo{}, err + } + + headers := opts.Header() + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload + headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload + headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + customHeader: headers, + expect200OKWithError: true, + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = io.ReadAll(resp.Body) + if err != nil { + return UploadInfo{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } + return UploadInfo{}, completeMultipartUploadErr + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: completeMultipartUploadResult.Bucket, + Key: completeMultipartUploadResult.Key, + ETag: trimEtag(completeMultipartUploadResult.ETag), + VersionID: resp.Header.Get(amzVersionID), + Location: completeMultipartUploadResult.Location, + Expiration: expTime, + ExpirationRuleID: ruleID, + + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, + ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME, + ChecksumMode: completeMultipartUploadResult.ChecksumType, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go new file mode 100644 index 000000000000..79d0c1dc1ba2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -0,0 +1,801 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "sync" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + } else { + info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) + } + if err != nil && s3utils.IsGoogleEndpoint(*c.endpointURL) { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == AccessDenied && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + withChecksum := c.trailingHeaderSupport + + // Aborts the multipart upload in progress, if the + // function returns any error, since we do not resume + // we should purge the parts which have been uploaded + // to relinquish storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Declare a channel that sends the next part number to be uploaded. + uploadPartsCh := make(chan uploadPartReq) + + // Declare a channel that sends back the response of a part upload. + uploadedPartsCh := make(chan uploadedPartRes) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + partitionCtx, partitionCancel := context.WithCancel(ctx) + defer partitionCancel() + // Send each part number to the channel to be processed. + go func() { + defer close(uploadPartsCh) + + for p := 1; p <= totalPartsCount; p++ { + select { + case <-partitionCtx.Done(): + return + case uploadPartsCh <- uploadPartReq{PartNum: p}: + } + } + }() + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= opts.getNumThreads(); w++ { + go func(partSize int64) { + for { + var uploadReq uploadPartReq + var ok bool + select { + case <-ctx.Done(): + return + case uploadReq, ok = <-uploadPartsCh: + if !ok { + return + } + // Each worker will draw from the part channel and upload in parallel. + } + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = size - lastPartSize + partSize = lastPartSize + } + + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + trailer := make(http.Header, 1) + if withChecksum { + crc := opts.AutoChecksum.Hasher() + trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash)) + }) + } + + // Proceed to upload the part. + p := uploadPartParams{ + bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } + objPart, err := c.uploadPart(ctx, p) + if err != nil { + select { + case <-ctx.Done(): + case uploadedPartsCh <- uploadedPartRes{ + Error: err, + }: + } + + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = objPart + + // Send successful part info through the channel. + select { + case <-ctx.Done(): + case uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + }: + } + } + }(partSize) + } + + // Gather the responses as they occur and update any + // progress bar. + allParts := make([]ObjectPart, 0, totalPartsCount) + for u := 1; u <= totalPartsCount; u++ { + select { + case <-ctx.Done(): + return UploadInfo{}, ctx.Err() + case uploadRes := <-uploadedPartsCh: + if uploadRes.Error != nil { + return UploadInfo{}, uploadRes.Error + } + allParts = append(allParts, uploadRes.Part) + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, + ChecksumCRC64NVME: uploadRes.Part.ChecksumCRC64NVME, + }) + } + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + if withChecksum { + applyAutoChecksum(&opts, allParts) + } + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + customHeader := make(http.Header) + crc := opts.AutoChecksum.Hasher() + md5Hash := c.md5Hasher() + defer md5Hash.Close() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Avoid declaring variables in the for loop + var md5Base64 string + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, rerr + } + + // Calculate md5sum. + if opts.SendContentMd5 { + md5Hash.Reset() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + } + + if opts.AutoChecksum.IsSet() { + // Add CRC32C instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + allParts = append(allParts, part) + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + applyAutoChecksum(&opts, allParts) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. +// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. +func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, + reader io.Reader, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Cancel all when an error occurs. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + crc := opts.AutoChecksum.Hasher() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + nBuffers := int64(opts.NumThreads) + bufs := make(chan []byte, nBuffers) + all := make([]byte, nBuffers*partSize) + for i := int64(0); i < nBuffers; i++ { + bufs <- all[i*partSize : i*partSize+partSize] + } + + var wg sync.WaitGroup + var mu sync.Mutex + errCh := make(chan error, opts.NumThreads) + + reader = newHook(reader, opts.Progress) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Proceed to upload the part. + var buf []byte + select { + case buf = <-bufs: + case err = <-errCh: + cancel() + wg.Wait() + return UploadInfo{}, err + } + + if int64(len(buf)) != partSize { + return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize) + } + + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + // Done + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + cancel() + wg.Wait() + return UploadInfo{}, rerr + } + + wg.Add(1) + go func(partNumber int) { + // Calculate md5sum. + customHeader := make(http.Header) + if opts.AutoChecksum.IsSet() { + // Add Checksum instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } + } + + // Avoid declaring variables in the for loop + var md5Base64 string + + if opts.SendContentMd5 { + md5Hash := c.md5Hasher() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + md5Hash.Close() + } + + defer wg.Done() + p := uploadPartParams{ + bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: bytes.NewReader(buf[:length]), + partNumber: partNumber, + md5Base64: md5Base64, + size: int64(length), + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + customHeader: customHeader, + } + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + errCh <- uerr + return + } + + // Save successfully uploaded part metadata. + mu.Lock() + partsInfo[partNumber] = objPart + mu.Unlock() + + // Send buffer back so it can be reused. + bufs <- buf + }(partNumber) + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + } + wg.Wait() + + // Collect any error + select { + case err = <-errCh: + return UploadInfo{}, err + default: + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + allParts = append(allParts, part) + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + applyAutoChecksum(&opts, allParts) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObject special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { + return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) + } + + if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { + return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") + } + + var readSeeker io.Seeker + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + seeker, ok := reader.(io.Seeker) + if ok { + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + readSeeker = reader.(io.Seeker) + } + } + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + + if readSeeker != nil { + if _, err := io.Copy(hash, reader); err != nil { + return UploadInfo{}, err + } + // Seek back to beginning of io.NewSectionReader's offset. + _, err = readSeeker.Seek(0, io.SeekStart) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } else { + // Create a buffer. + buf := make([]byte, size) + + length, err := readFull(reader, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, err + } + + hash.Write(buf[:length]) + reader = bytes.NewReader(buf[:length]) + } + + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + progressReader := newHook(reader, opts.Progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + streamSha256: !opts.DisableContentSha256, + } + // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks. + addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure) + if opts.Checksum.IsSet() { + reqMetadata.addCrc = &opts.Checksum + } else if addCrc { + // If user has added checksums, don't add them ourselves. + for k := range opts.UserMetadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + addCrc = false + } + } + if addCrc { + opts.AutoChecksum.SetDefault(ChecksumFullObjectCRC32C) + reqMetadata.addCrc = &opts.AutoChecksum + } + } + + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } + urlValues := make(url.Values) + urlValues.Set("versionId", opts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + h := resp.Header + return UploadInfo{ + Bucket: bucketName, + Key: objectName, + ETag: trimEtag(h.Get("ETag")), + VersionID: h.Get(amzVersionID), + Size: size, + Expiration: expTime, + ExpirationRuleID: ruleID, + + // Checksum values + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), + ChecksumMode: h.Get(ChecksumFullObjectMode.Key()), + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go new file mode 100644 index 000000000000..80f3d61f34a4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -0,0 +1,522 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "sort" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" + "golang.org/x/net/http/httpguts" +) + +// ReplicationStatus represents replication status of object +type ReplicationStatus string + +const ( + // ReplicationStatusPending indicates replication is pending + ReplicationStatusPending ReplicationStatus = "PENDING" + // ReplicationStatusComplete indicates replication completed ok + ReplicationStatusComplete ReplicationStatus = "COMPLETED" + // ReplicationStatusFailed indicates replication failed + ReplicationStatusFailed ReplicationStatus = "FAILED" + // ReplicationStatusReplica indicates object is a replica of a source + ReplicationStatusReplica ReplicationStatus = "REPLICA" + // ReplicationStatusReplicaEdge indicates object is a replica of a edge source + ReplicationStatusReplicaEdge ReplicationStatus = "REPLICA-EDGE" +) + +// Empty returns true if no replication status set. +func (r ReplicationStatus) Empty() bool { + return r == "" +} + +// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition +// implementation on MinIO server +type AdvancedPutOptions struct { + SourceVersionID string + SourceETag string + ReplicationStatus ReplicationStatus + SourceMTime time.Time + ReplicationRequest bool + RetentionTimestamp time.Time + TaggingTimestamp time.Time + LegalholdTimestamp time.Time + ReplicationValidityCheck bool +} + +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + UserTags map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + Expires time.Time + Mode RetentionMode + RetainUntilDate time.Time + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string + WebsiteRedirectLocation string + PartSize uint64 + LegalHold LegalHoldStatus + SendContentMd5 bool + DisableContentSha256 bool + DisableMultipart bool + + // AutoChecksum is the type of checksum that will be added if no other checksum is added, + // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type. + // If none is specified CRC32C is used, since it is generally the fastest. + AutoChecksum ChecksumType + + // Checksum will force a checksum of the specific type. + // This requires that the client was created with "TrailingHeaders:true" option, + // and that the destination server supports it. + // Unavailable with V2 signatures & Google endpoints. + // This will disable content MD5 checksums if set. + Checksum ChecksumType + + // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, + // fill them serially and upload them in parallel. + // This can be used for faster uploads on non-seekable or slow-to-seek input. + ConcurrentStreamParts bool + Internal AdvancedPutOptions + + customHeaders http.Header +} + +// SetMatchETag if etag matches while PUT MinIO returns an error +// this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETag(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + if etag == "*" { + opts.customHeaders.Set("If-Match", "*") + } else { + opts.customHeaders.Set("If-Match", "\""+etag+"\"") + } +} + +// SetMatchETagExcept if etag does not match while PUT MinIO returns an +// error this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + if etag == "*" { + opts.customHeaders.Set("If-None-Match", "*") + } else { + opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") + } +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers + } + return numThreads +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + contentType := opts.ContentType + if contentType == "" { + contentType = "application/octet-stream" + } + header.Set("Content-Type", contentType) + + if opts.ContentEncoding != "" { + header.Set("Content-Encoding", opts.ContentEncoding) + } + if opts.ContentDisposition != "" { + header.Set("Content-Disposition", opts.ContentDisposition) + } + if opts.ContentLanguage != "" { + header.Set("Content-Language", opts.ContentLanguage) + } + if opts.CacheControl != "" { + header.Set("Cache-Control", opts.CacheControl) + } + + if !opts.Expires.IsZero() { + header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat)) + } + + if opts.Mode != "" { + header.Set(amzLockMode, opts.Mode.String()) + } + + if !opts.RetainUntilDate.IsZero() { + header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.LegalHold != "" { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) + } + + if opts.StorageClass != "" { + header.Set(amzStorageClass, opts.StorageClass) + } + + if opts.WebsiteRedirectLocation != "" { + header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) + } + + if !opts.Internal.ReplicationStatus.Empty() { + header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if !opts.Internal.SourceMTime.IsZero() { + header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if opts.Internal.SourceETag != "" { + header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) + } + if opts.Internal.ReplicationRequest { + header.Set(minIOBucketReplicationRequest, "true") + } + if opts.Internal.ReplicationValidityCheck { + header.Set(minIOBucketReplicationCheck, "true") + } + if !opts.Internal.LegalholdTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.RetentionTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.TaggingTimestamp.IsZero() { + header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + + if len(opts.UserTags) != 0 { + if tags, _ := tags.NewTags(opts.UserTags, true); tags != nil { + header.Set(amzTaggingHeader, tags.String()) + } + } + + for k, v := range opts.UserMetadata { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + + // set any other additional custom headers. + for k, v := range opts.customHeaders { + header[k] = v + } + + return header +} + +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. +func (opts PutObjectOptions) validate(c *Client) (err error) { + for k, v := range opts.UserMetadata { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { + return errInvalidArgument(k + " unsupported user defined metadata name") + } + if !httpguts.ValidHeaderFieldValue(v) { + return errInvalidArgument(v + " unsupported user defined metadata value") + } + } + if opts.Mode != "" && !opts.Mode.IsValid() { + return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") + } + if opts.LegalHold != "" && !opts.LegalHold.IsValid() { + return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") + } + + checkCrc := false + for k := range opts.UserMetadata { + if strings.HasPrefix(k, "x-amz-checksum-") { + checkCrc = true + break + } + } + + if opts.Checksum.IsSet() || checkCrc { + switch { + case !c.trailingHeaderSupport: + return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled") + case c.overrideSignerType.IsV2(): + return errInvalidArgument("Checksum cannot be used with v2 signatures") + case s3utils.IsGoogleEndpoint(*c.endpointURL): + return errInvalidArgument("Checksum cannot be used with GCS endpoints") + } + } + + return nil +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 16MiB PutObject automatically does a +// single atomic PUT operation. +// +// - For size larger than 16MiB PutObject automatically does a +// multipart upload operation. +// +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. +// +// WARNING: Passing down '-1' will use memory and these cannot +// be reused for best outcomes for PutObject(), pass the size always. +// +// NOTE: Upon errors during upload multipart operation is entirely aborted. +func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { + if size < 0 && opts.DisableMultipart { + return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") + } + + err = opts.validate(c) + if err != nil { + return UploadInfo{}, err + } + + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + if opts.Checksum.IsSet() { + opts.AutoChecksum = opts.Checksum + opts.SendContentMd5 = false + } + + if c.trailingHeaderSupport { + opts.AutoChecksum.SetDefault(ChecksumCRC32C) + addAutoChecksumHeaders(&opts) + } + + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + partSize := opts.PartSize + if opts.PartSize == 0 { + partSize = minPartSize + } + + if c.overrideSignerType.IsV2() { + if size >= 0 && size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) + } + + if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) + } + + if size <= int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) +} + +func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + customHeader := make(http.Header) + crc := opts.AutoChecksum.Hasher() + + for partNumber <= totalPartsCount { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { + return UploadInfo{}, rerr + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + } + + if opts.AutoChecksum.IsSet() { + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(amzChecksumAlgo, opts.AutoChecksum.String()) + if opts.AutoChecksum.FullObjectRequested() { + customHeader.Set(amzChecksumMode, ChecksumFullObjectMode.String()) + } + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Proceed to upload the part. + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rerr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + allParts = append(allParts, part) + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + applyAutoChecksum(&opts, allParts) + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go new file mode 100644 index 000000000000..22e1af370426 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -0,0 +1,246 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/s2" +) + +// SnowballOptions contains options for PutObjectsSnowball calls. +type SnowballOptions struct { + // Opts is options applied to all objects. + Opts PutObjectOptions + + // Processing options: + + // InMemory specifies that all objects should be collected in memory + // before they are uploaded. + // If false a temporary file will be created. + InMemory bool + + // Compress enabled content compression before upload. + // Compression will typically reduce memory and network usage, + // Compression can safely be enabled with MinIO hosts. + Compress bool + + // SkipErrs if enabled will skip any errors while reading the + // object content while creating the snowball archive + SkipErrs bool +} + +// SnowballObject contains information about a single object to be added to the snowball. +type SnowballObject struct { + // Key is the destination key, including prefix. + Key string + + // Size is the content size of this object. + Size int64 + + // Modtime to apply to the object. + // If Modtime is the zero value current time will be used. + ModTime time.Time + + // Content of the object. + // Exactly 'Size' number of bytes must be provided. + Content io.Reader + + // VersionID of the object; if empty, a new versionID will be generated + VersionID string + + // Headers contains more options for this object upload, the same as you + // would include in a regular PutObject operation, such as user metadata + // and content-disposition, expires, .. + Headers http.Header + + // Close will be called when an object has finished processing. + // Note that if PutObjectsSnowball returns because of an error, + // objects not consumed from the input will NOT have been closed. + // Leave as nil for no callback. + Close func() +} + +type nopReadSeekCloser struct { + io.ReadSeeker +} + +func (n nopReadSeekCloser) Close() error { + return nil +} + +// This is available as io.ReadSeekCloser from go1.16 +type readSeekCloser interface { + io.Reader + io.Closer + io.Seeker +} + +// PutObjectsSnowball will put multiple objects with a single put call. +// A (compressed) TAR file will be created which will contain multiple objects. +// The key for each object will be used for the destination in the specified bucket. +// Total size should be < 5TB. +// This function blocks until 'objs' is closed and the content has been uploaded. +func (c *Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { + err = opts.Opts.validate(c) + if err != nil { + return err + } + var tmpWriter io.Writer + var getTmpReader func() (rc readSeekCloser, sz int64, err error) + if opts.InMemory { + b := bytes.NewBuffer(nil) + tmpWriter = b + getTmpReader = func() (readSeekCloser, int64, error) { + return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil + } + } else { + f, err := os.CreateTemp("", "s3-putsnowballobjects-*") + if err != nil { + return err + } + name := f.Name() + tmpWriter = f + var once sync.Once + defer once.Do(func() { + f.Close() + }) + defer os.Remove(name) + getTmpReader = func() (readSeekCloser, int64, error) { + once.Do(func() { + f.Close() + }) + f, err := os.Open(name) + if err != nil { + return nil, 0, err + } + st, err := f.Stat() + if err != nil { + return nil, 0, err + } + return f, st.Size(), nil + } + } + flush := func() error { return nil } + if !opts.Compress { + if !opts.InMemory { + // Insert buffer for writes. + buf := bufio.NewWriterSize(tmpWriter, 1<<20) + flush = buf.Flush + tmpWriter = buf + } + } else { + s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) + flush = s2c.Close + defer s2c.Close() + tmpWriter = s2c + } + t := tar.NewWriter(tmpWriter) + +objectLoop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case obj, ok := <-objs: + if !ok { + break objectLoop + } + + closeObj := func() {} + if obj.Close != nil { + closeObj = obj.Close + } + + // Trim accidental slash prefix. + obj.Key = strings.TrimPrefix(obj.Key, "/") + header := tar.Header{ + Typeflag: tar.TypeReg, + Name: obj.Key, + Size: obj.Size, + ModTime: obj.ModTime, + Format: tar.FormatPAX, + } + if header.ModTime.IsZero() { + header.ModTime = time.Now().UTC() + } + + header.PAXRecords = make(map[string]string) + if obj.VersionID != "" { + header.PAXRecords["minio.versionId"] = obj.VersionID + } + for k, vals := range obj.Headers { + header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",") + } + + if err := t.WriteHeader(&header); err != nil { + closeObj() + return err + } + n, err := io.Copy(t, obj.Content) + if err != nil { + closeObj() + if opts.SkipErrs { + continue + } + return err + } + if n != obj.Size { + closeObj() + if opts.SkipErrs { + continue + } + return io.ErrUnexpectedEOF + } + closeObj() + } + } + // Flush tar + err = t.Flush() + if err != nil { + return err + } + // Flush compression + err = flush() + if err != nil { + return err + } + if opts.Opts.UserMetadata == nil { + opts.Opts.UserMetadata = map[string]string{} + } + opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" + opts.Opts.DisableMultipart = true + rc, sz, err := getTmpReader() + if err != nil { + return err + } + defer rc.Close() + rand := c.random.Uint64() + _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go new file mode 100644 index 000000000000..9794ffb2bde5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -0,0 +1,719 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "iter" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +//revive:disable + +// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. +type BucketOptions = RemoveBucketOptions + +//revive:enable + +// RemoveBucketOptions special headers to purge buckets, only +// useful when endpoint is MinIO +type RemoveBucketOptions struct { + ForceDelete bool +} + +// RemoveBucketWithOptions deletes the bucket name. +// +// All objects (including all object versions and delete markers) +// in the bucket will be deleted forcibly if bucket options set +// ForceDelete to 'true'. +func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + return nil +} + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// AdvancedRemoveOptions intended for internal use by replication +type AdvancedRemoveOptions struct { + ReplicationDeleteMarker bool + ReplicationStatus ReplicationStatus + ReplicationMTime time.Time + ReplicationRequest bool + ReplicationValidityCheck bool // check permissions +} + +// RemoveObjectOptions represents options specified by user for RemoveObject call +type RemoveObjectOptions struct { + ForceDelete bool + GovernanceBypass bool + VersionID string + Internal AdvancedRemoveOptions +} + +// RemoveObject removes an object from a bucket. +func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + res := c.removeObject(ctx, bucketName, objectName, opts) + return res.Err +} + +func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + if !opts.Internal.ReplicationMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) + } + if !opts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if opts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "true") + } + if opts.Internal.ReplicationValidityCheck { + headers.Set(minIOBucketReplicationCheck, "true") + } + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + queryValues: urlValues, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return RemoveObjectResult{Err: err} + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + err := httpRespToErrorResponse(resp, bucketName, objectName) + return RemoveObjectResult{Err: err} + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return RemoveObjectResult{ + ObjectName: objectName, + ObjectVersionID: opts.VersionID, + DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true", + DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"), + } +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + VersionID string + Err error +} + +func (err *RemoveObjectError) Error() string { + // This should never happen as we will have a non-nil error with no underlying error. + if err.Err == nil { + return "unexpected remove object error result" + } + return err.Err.Error() +} + +// RemoveObjectResult - container of Multi Delete S3 API result +type RemoveObjectResult struct { + ObjectName string + ObjectVersionID string + + DeleteMarker bool + DeleteMarkerVersionID string + + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { + delObjects := []deleteObject{} + for _, obj := range objects { + delObjects = append(delObjects, deleteObject{ + Key: obj.Key, + VersionID: obj.VersionID, + }) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + resultCh <- RemoveObjectResult{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned success + for _, obj := range rmResult.DeletedObjects { + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + // Only filled with versioned buckets + ObjectVersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + DeleteMarkerVersionID: obj.DeleteMarkerVersionID, + } + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + // Version does not exist is not an error ignore and continue. + switch obj.Code { + case InvalidArgument, NoSuchVersion: + continue + } + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + ObjectVersionID: obj.VersionID, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjectsOptions represents options specified by user for RemoveObjects call +type RemoveObjectsOptions struct { + GovernanceBypass bool +} + +// RemoveObjects removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove failures are sent back via error channel. +func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + resultCh := make(chan RemoveObjectResult, 1) + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + go func() { + defer close(errorCh) + for res := range resultCh { + // Send only errors to the error channel + if res.Err == nil { + continue + } + errorCh <- RemoveObjectError{ + ObjectName: res.ObjectName, + VersionID: res.ObjectVersionID, + Err: res.Err, + } + } + }() + + return errorCh +} + +// RemoveObjectsWithIter bulk deletes multiple objects from a bucket. +// Objects (with optional versions) to be removed must be provided with +// an iterator. Objects are removed asynchronously and results must be +// consumed. If the returned result iterator is stopped, the context is +// canceled, or a remote call failed, the provided iterator will no +// longer accept more objects. +func (c *Client) RemoveObjectsWithIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], opts RemoveObjectsOptions) (iter.Seq[RemoveObjectResult], error) { + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + // Validate objects channel to be properly allocated. + if objectsIter == nil { + return nil, errInvalidArgument("Objects iter can never by nil") + } + + return func(yield func(RemoveObjectResult) bool) { + select { + case <-ctx.Done(): + return + default: + } + + c.removeObjectsIter(ctx, bucketName, objectsIter, yield, opts) + }, nil +} + +// RemoveObjectsWithResult removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove results, successes and failures are sent back via +// RemoveObjectResult channel +func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult { + resultCh := make(chan RemoveObjectResult, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: err, + } + return resultCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return resultCh + } + + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + return resultCh +} + +// Return true if the character is within the allowed characters in an XML 1.0 document +// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets +func validXMLChar(r rune) (ok bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +func hasInvalidXMLChar(str string) bool { + for _, s := range str { + if !validXMLChar(s) { + return true + } + } + return false +} + +// Generate and call MultiDelete S3 requests based on entries received from the iterator. +func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) { + maxEntries := 1000 + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + processRemoveMultiObjectsResponseIter := func(batch []ObjectInfo, yield func(RemoveObjectResult) bool) bool { + if len(batch) == 0 { + return false + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute POST on bucket to remove objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + }) + if resp != nil { + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + } + } + if err != nil { + for _, b := range batch { + if !yield(RemoveObjectResult{ + ObjectName: b.Key, + ObjectVersionID: b.VersionID, + Err: err, + }) { + return false + } + } + return false + } + + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + if err := xmlDecoder(resp.Body, rmResult); err != nil { + yield(RemoveObjectResult{ObjectName: "", Err: err}) + return false + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + // Version does not exist is not an error ignore and continue. + switch obj.Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + if !yield(RemoveObjectResult{ + ObjectName: obj.Key, + ObjectVersionID: obj.VersionID, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + }) { + return false + } + } + + // Fill deletion that returned success + for _, obj := range rmResult.DeletedObjects { + if !yield(RemoveObjectResult{ + ObjectName: obj.Key, + // Only filled with versioned buckets + ObjectVersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + DeleteMarkerVersionID: obj.DeleteMarkerVersionID, + }) { + return false + } + } + + return true + } + + var batch []ObjectInfo + + next, stop := iter.Pull(objectsIter) + defer stop() + + for { + // Loop over entries by 1000 and call MultiDelete requests + object, ok := next() + if !ok { + // delete the remaining batch. + processRemoveMultiObjectsResponseIter(batch, yield) + return + } + + if hasInvalidXMLChar(object.Key) { + // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. + removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + VersionID: object.VersionID, + GovernanceBypass: opts.GovernanceBypass, + }) + if err := removeResult.Err; err != nil { + // Version does not exist is not an error ignore and continue. + switch ToErrorResponse(err).Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + } + if !yield(removeResult) { + return + } + + continue + } + + batch = append(batch, object) + if len(batch) < maxEntries { + continue + } + + if !processRemoveMultiObjectsResponseIter(batch, yield) { + return + } + + batch = batch[:0] + } +} + +// Generate and call MultiDelete S3 requests based on entries received from objectsCh +func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close result channel when Multi delete finishes. + defer close(resultCh) + + // Loop over entries by 1000 and call MultiDelete requests + for !finish { + count := 0 + var batch []ObjectInfo + + // Try to gather 1000 entries + for object := range objectsCh { + if hasInvalidXMLChar(object.Key) { + // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. + removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + VersionID: object.VersionID, + GovernanceBypass: opts.GovernanceBypass, + }) + if err := removeResult.Err; err != nil { + // Version does not exist is not an error ignore and continue. + switch ToErrorResponse(err).Code { + case InvalidArgument, NoSuchVersion: + continue + } + resultCh <- removeResult + } + + resultCh <- removeResult + continue + } + + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute POST on bucket to remove objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + expect200OKWithError: true, + }) + + if resp != nil && resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + } + + if err != nil { + for _, b := range batch { + resultCh <- RemoveObjectResult{ + ObjectName: b.Key, + ObjectVersionID: b.VersionID, + Err: err, + } + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, resultCh) + + closeResponse(resp) + } +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) + if err != nil { + return err + } + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. + err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + if err != nil { + return err + } + } + + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: NoSuchUpload, + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go new file mode 100644 index 000000000000..9ec8f4f24406 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// RestoreType represents the restore request type +type RestoreType string + +const ( + // RestoreSelect represents the restore SELECT operation + RestoreSelect = RestoreType("SELECT") +) + +// TierType represents a retrieval tier +type TierType string + +const ( + // TierStandard is the standard retrieval tier + TierStandard = TierType("Standard") + // TierBulk is the bulk retrieval tier + TierBulk = TierType("Bulk") + // TierExpedited is the expedited retrieval tier + TierExpedited = TierType("Expedited") +) + +// GlacierJobParameters represents the retrieval tier parameter +type GlacierJobParameters struct { + Tier TierType +} + +// Encryption contains the type of server-side encryption used during object retrieval +type Encryption struct { + EncryptionType string + KMSContext string + KMSKeyID string `xml:"KMSKeyId"` +} + +// MetadataEntry represents a metadata information of the restored object. +type MetadataEntry struct { + Name string + Value string +} + +// S3 holds properties of the copy of the archived object +type S3 struct { + AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` + BucketName string + Prefix string + CannedACL *string `xml:"CannedACL,omitempty"` + Encryption *Encryption `xml:"Encryption,omitempty"` + StorageClass *string `xml:"StorageClass,omitempty"` + Tagging *tags.Tags `xml:"Tagging,omitempty"` + UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` +} + +// SelectParameters holds the select request parameters +type SelectParameters struct { + XMLName xml.Name `xml:"SelectParameters"` + ExpressionType QueryExpressionType + Expression string + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization +} + +// OutputLocation holds properties of the copy of the archived object +type OutputLocation struct { + XMLName xml.Name `xml:"OutputLocation"` + S3 S3 `xml:"S3"` +} + +// RestoreRequest holds properties of the restore object request +type RestoreRequest struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` + Type *RestoreType `xml:"Type,omitempty"` + Tier *TierType `xml:"Tier,omitempty"` + Days *int `xml:"Days,omitempty"` + GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` + Description *string `xml:"Description,omitempty"` + SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` + OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` +} + +// SetDays sets the days parameter of the restore request +func (r *RestoreRequest) SetDays(v int) { + r.Days = &v +} + +// SetGlacierJobParameters sets the GlacierJobParameters of the restore request +func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { + r.GlacierJobParameters = &v +} + +// SetType sets the type of the restore request +func (r *RestoreRequest) SetType(v RestoreType) { + r.Type = &v +} + +// SetTier sets the retrieval tier of the restore request +func (r *RestoreRequest) SetTier(v TierType) { + r.Tier = &v +} + +// SetDescription sets the description of the restore request +func (r *RestoreRequest) SetDescription(v string) { + r.Description = &v +} + +// SetSelectParameters sets SelectParameters of the restore select request +func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { + r.SelectParameters = &v +} + +// SetOutputLocation sets the properties of the copy of the archived object +func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { + r.OutputLocation = &v +} + +// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API +func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + restoreRequestBytes, err := xml.Marshal(req) + if err != nil { + return err + } + + urlValues := make(url.Values) + urlValues.Set("restore", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentMD5Base64: sumMD5Base64(restoreRequestBytes), + contentSHA256Hex: sum256Hex(restoreRequestBytes), + contentBody: bytes.NewReader(restoreRequestBytes), + contentLength: int64(len(restoreRequestBytes)), + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go new file mode 100644 index 000000000000..32d589716950 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -0,0 +1,468 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "io" + "reflect" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// listAllMyDirectoryBucketsResult container for listDirectoryBuckets response. +type listAllMyDirectoryBucketsResult struct { + Buckets struct { + Bucket []BucketInfo + } + ContinuationToken string +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// CommonPrefix container for prefix response. +type CommonPrefix struct { + Prefix string +} + +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// Version is an element in the list object versions response +type Version struct { + ETag string + IsLatest bool + Key string + LastModified time.Time + Owner Owner + Size int64 + StorageClass string + VersionID string `xml:"VersionId"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + // Only returned by MinIO servers. + UserMetadata StringMap `json:"userMetadata,omitempty"` + + // x-amz-tagging values in their k/v values. + // Only returned by MinIO servers. + UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` + + Internal *struct { + K int // Data blocks + M int // Parity blocks + } `xml:"Internal"` + + // Checksum values. Only returned by AiStor servers. + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` + ChecksumType string `xml:",omitempty"` + + isDeleteMarker bool +} + +// ListVersionsResult is an element in the list object versions response +// and has a special Unmarshaler because we need to preserver the order +// of and in ListVersionsResult.Versions slice +type ListVersionsResult struct { + Versions []Version + + CommonPrefixes []CommonPrefix + Name string + Prefix string + Delimiter string + MaxKeys int64 + EncodingType string + IsTruncated bool + KeyMarker string + VersionIDMarker string + NextKeyMarker string + NextVersionIDMarker string +} + +// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom +// code will unmarshal and tags and save them in Versions field to +// preserve the lexical order of the listing. +func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) { + for { + // Read tokens from the XML document in a stream. + t, err := d.Token() + if err != nil { + if err == io.EOF { + break + } + return err + } + + se, ok := t.(xml.StartElement) + if ok { + tagName := se.Name.Local + switch tagName { + case "Name", "Prefix", + "Delimiter", "EncodingType", + "KeyMarker", "NextKeyMarker": + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + v := reflect.ValueOf(l).Elem().FieldByName(tagName) + if v.IsValid() { + v.SetString(s) + } + case "VersionIdMarker": + // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.VersionIDMarker = s + case "NextVersionIdMarker": + // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.NextVersionIDMarker = s + case "IsTruncated": // bool + var b bool + if err = d.DecodeElement(&b, &se); err != nil { + return err + } + l.IsTruncated = b + case "MaxKeys": // int64 + var i int64 + if err = d.DecodeElement(&i, &se); err != nil { + return err + } + l.MaxKeys = i + case "CommonPrefixes": + var cp CommonPrefix + if err = d.DecodeElement(&cp, &se); err != nil { + return err + } + l.CommonPrefixes = append(l.CommonPrefixes, cp) + case "DeleteMarker", "Version": + var v Version + if err = d.DecodeElement(&v, &se); err != nil { + return err + } + if tagName == "DeleteMarker" { + v.isDeleteMarker = true + } + l.Versions = append(l.Versions, v) + default: + return errors.New("unrecognized option:" + tagName) + } + } + } + return nil +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []CommonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 + + // Checksum values of each part. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string +} + +// Checksum will return the checksum for the given type. +// Will return the empty string if not set. +func (c ObjectPart) Checksum(t ChecksumType) string { + switch { + case t.Is(ChecksumCRC32C): + return c.ChecksumCRC32C + case t.Is(ChecksumCRC32): + return c.ChecksumCRC32 + case t.Is(ChecksumSHA1): + return c.ChecksumSHA1 + case t.Is(ChecksumSHA256): + return c.ChecksumSHA256 + case t.Is(ChecksumCRC64NVME): + return c.ChecksumCRC64NVME + } + return "" +} + +// ChecksumRaw returns the decoded checksum from the part. +func (c ObjectPart) ChecksumRaw(t ChecksumType) ([]byte, error) { + b64 := c.Checksum(t) + if b64 == "" { + return nil, errors.New("no checksum set") + } + decoded, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return nil, err + } + if len(decoded) != t.RawByteLen() { + return nil, errors.New("checksum length mismatch") + } + return decoded, nil +} + +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // ChecksumAlgorithm will be CRC32, CRC32C, etc. + ChecksumAlgorithm string + + // ChecksumType is FULL_OBJECT or COMPOSITE (assume COMPOSITE when unset) + ChecksumType string + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []ObjectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string + + // Checksum values, hash of hashes of parts. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string + ChecksumType string +} + +// CompletePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type CompletePart struct { + // Part number identifies the part. + PartNumber int + ETag string + + // Checksum values + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` +} + +// Checksum will return the checksum for the given type. +// Will return the empty string if not set. +func (c CompletePart) Checksum(t ChecksumType) string { + switch { + case t.Is(ChecksumCRC32C): + return c.ChecksumCRC32C + case t.Is(ChecksumCRC32): + return c.ChecksumCRC32 + case t.Is(ChecksumSHA1): + return c.ChecksumSHA1 + case t.Is(ChecksumSHA256): + return c.ChecksumSHA256 + case t.Is(ChecksumCRC64NVME): + return c.ChecksumCRC64NVME + } + return "" +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []CompletePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string + VersionID string `xml:"VersionId"` +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go new file mode 100644 index 000000000000..4fb4db9ba311 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -0,0 +1,755 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE" + CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP SelectCompressionType = "GZIP" + SelectCompressionBZIP SelectCompressionType = "BZIP2" + + // Non-standard compression schemes, supported by MinIO hosts: + + SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream + SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream + SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType JSONType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + fileHeaderInfoSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool + + Comments string + commentsSet bool +} + +// SetFileHeaderInfo sets the file header info in the CSV input options +func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { + c.FileHeaderInfo = val + c.fileHeaderInfoSet = true +} + +// SetRecordDelimiter sets the record delimiter in the CSV input options +func (c *CSVInputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter in the CSV input options +func (c *CSVInputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV input options +func (c *CSVInputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options +func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// SetComments sets the comments character in the CSV input options +func (c *CSVInputOptions) SetComments(val string) { + c.Comments = val + c.commentsSet = true +} + +// MarshalXML - produces the xml representation of the CSV input options struct +func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { + if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + if c.Comments != "" || c.commentsSet { + if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields + quoteFieldsSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool +} + +// SetQuoteFields sets the quote field parameter in the CSV output options +func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { + c.QuoteFields = val + c.quoteFieldsSet = true +} + +// SetRecordDelimiter sets the record delimiter character in the CSV output options +func (c *CSVOutputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter character in the CSV output options +func (c *CSVOutputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV output options +func (c *CSVOutputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options +func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// MarshalXML - produces the xml representation of the CSVOutputOptions struct +func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if c.QuoteFields != "" || c.quoteFieldsSet { + if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType + typeSet bool +} + +// SetType sets the JSON type in the JSON input options +func (j *JSONInputOptions) SetType(typ JSONType) { + j.Type = typ + j.typeSet = true +} + +// MarshalXML - produces the xml representation of the JSONInputOptions struct +func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.Type != "" || j.typeSet { + if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string + recordDelimiterSet bool +} + +// SetRecordDelimiter sets the record delimiter in the JSON output options +func (j *JSONOutputOptions) SetRecordDelimiter(val string) { + j.RecordDelimiter = val + j.recordDelimiterSet = true +} + +// MarshalXML - produces the xml representation of the JSONOutputOptions struct +func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.RecordDelimiter != "" || j.recordDelimiterSet { + if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON SelectObjectType = "JSON" + SelectObjectTypeParquet SelectObjectType = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// messageType represents the type of message. +type messageType string + +const ( + errorMsg messageType = "error" + commonMsg messageType = "event" +) + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + recordsEvent eventType = "Records" + progressEvent eventType = "Progress" + statsEvent eventType = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + return NewSelectResults(resp, bucketName) +} + +// NewSelectResults creates a Select Result parser that parses the response +// and returns a Reader that will return parsed and assembled select output. +func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + headers := make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + m := messageType(headers.Get("message-type")) + + switch m { + case errorMsg: + pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) + closeResponse(s.resp) + return + case commonMsg: + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + pInfo := preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go new file mode 100644 index 000000000000..a4b2af7aefce --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -0,0 +1,124 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to +// control cancellations and timeouts. +func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == NoSuchBucket { + return false, nil + } + return false, err + } + if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == NoSuchBucket { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// StatObject verifies if object exists, you have permission to access it +// and returns information about the object. +func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: InvalidBucketName, + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: XMinioInvalidObjectName, + Message: err.Error(), + } + } + headers := opts.Header() + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + if opts.Internal.IsReplicationReadyForDeleteMarker { + headers.Set(isMinioTgtReplicationReady, "true") + } + + // Execute HEAD on objectName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + if resp != nil { + deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Code: MethodNotAllowed, + Message: s3ErrorResponseMap[MethodNotAllowed], + BucketName: bucketName, + Key: objectName, + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + }, errResp + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationReady: replicationReady, // whether delete marker can be replicated + }, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return ToObjectInfo(bucketName, objectName, resp.Header) +} diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go new file mode 100644 index 000000000000..5352d793b878 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -0,0 +1,1138 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httptrace" + "net/http/httputil" + "net/url" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/dustin/go-humanize" + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/kvcache" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" + "github.com/minio/minio-go/v7/pkg/singleflight" + "golang.org/x/net/publicsuffix" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + // Standard options. + + // Parsed endpoint url provided by the user. + endpointURL *url.URL + + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + httpTrace *httptrace.ClientTrace + bucketLocCache *kvcache.Cache[string, string] + bucketSessionCache *kvcache.Cache[string, credentials.Value] + credsGroup singleflight.Group[string, credentials.Value] + + // Advanced functionality. + isTraceEnabled bool + traceErrorsOnly bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + // S3 dual-stack endpoints are enabled by default. + s3DualstackEnabled bool + + // Region endpoint + region string + + // Random seed. + random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType + + // lookupFn is a custom function to return URL lookup type supported by the server. + lookupFn func(u url.URL, bucketName string) BucketLookupType + + // Factory for MD5 hash functions. + md5Hasher func() md5simd.Hasher + sha256Hasher func() md5simd.Hasher + + healthStatus int32 + + trailingHeaderSupport bool + maxRetries int +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Transport http.RoundTripper + Trace *httptrace.ClientTrace + Region string + BucketLookup BucketLookupType + + // Allows setting a custom region lookup based on URL pattern + // not all URL patterns are covered by this library so if you + // have a custom endpoints with many regions you can use this + // function to perform region lookups appropriately. + CustomRegionViaURL func(u url.URL) string + + // Provide a custom function that returns BucketLookupType based + // on the input URL, this is just like s3utils.IsVirtualHostSupported() + // function but allows users to provide their own implementation. + // Once this is set it overrides all settings for opts.BucketLookup + // if this function returns BucketLookupAuto then default detection + // via s3utils.IsVirtualHostSupported() is used, otherwise the + // function is expected to return appropriate value as expected for + // the URL the user wishes to honor. + // + // BucketName is passed additionally for the caller to ensure + // handle situations where `bucketNames` have multiple `.` separators + // in such case HTTPs certs will not work properly for *. + // wildcards, so you need to specifically handle these situations + // and not return bucket as part of DNS since those requests may fail. + // + // For better understanding look at s3utils.IsVirtualHostSupported() + // implementation. + BucketLookupViaURL func(u url.URL, bucketName string) BucketLookupType + + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + + // Custom hash routines. Leave nil to use standard. + CustomMD5 func() md5simd.Hasher + CustomSHA256 func() md5simd.Hasher + + // Number of times a request is retried. Defaults to 10 retries if this option is not configured. + // Set to 1 to disable retries. + MaxRetries int +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "v7.0.96" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// MinIO (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + +// New - instantiate minio client with options +func New(endpoint string, opts *Options) (*Client, error) { + if opts == nil { + return nil, errors.New("no options provided") + } + clnt, err := privateNew(endpoint, opts) + if err != nil { + return nil, err + } + if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + // If Amazon S3 set to signature v4. + clnt.overrideSignerType = credentials.SignatureV4 + // Amazon S3 endpoints are resolved into dual-stack endpoints by default + // for backwards compatibility. + clnt.s3DualstackEnabled = true + } + + return clnt, nil +} + +// EndpointURL returns the URL of the S3-compatible endpoint that this client connects to. +// +// Returns a copy of the endpoint URL to prevent modification of internal state. +func (c *Client) EndpointURL() *url.URL { + endpoint := *c.endpointURL // copy to prevent callers from modifying internal state + return &endpoint +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return n +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +func privateNew(endpoint string, opts *Options) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, opts.Secure) + if err != nil { + return nil, err + } + + // Initialize cookies to preserve server sent cookies if any and replay + // them upon each request. + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + + // Save the credentials. + clnt.credsProvider = opts.Creds + + // Remember whether we are using https or not + clnt.secure = opts.Secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + transport := opts.Transport + if transport == nil { + transport, err = DefaultTransport(opts.Secure) + if err != nil { + return nil, err + } + } + + clnt.httpTrace = opts.Trace + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Jar: jar, + Transport: transport, + CheckRedirect: func(_ *http.Request, _ []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + // Sets custom region, if region is empty bucket location cache is used automatically. + if opts.Region == "" { + if opts.CustomRegionViaURL != nil { + opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL) + } else { + opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) + } + } + clnt.region = opts.Region + + // Initialize bucket region cache. + clnt.bucketLocCache = &kvcache.Cache[string, string]{} + + // Initialize bucket session cache (s3 express). + clnt.bucketSessionCache = &kvcache.Cache[string, credentials.Value]{} + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Add default md5 hasher. + clnt.md5Hasher = opts.CustomMD5 + clnt.sha256Hasher = opts.CustomSHA256 + if clnt.md5Hasher == nil { + clnt.md5Hasher = newMd5Hasher + } + if clnt.sha256Hasher == nil { + clnt.sha256Hasher = newSHA256Hasher + } + + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() + + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = opts.BucketLookup + clnt.lookupFn = opts.BucketLookupViaURL + + // healthcheck is not initialized + clnt.healthStatus = unknown + + clnt.maxRetries = MaxRetry + if opts.MaxRetries > 0 { + clnt.maxRetries = opts.MaxRetries + } + + // Return. + return clnt, nil +} + +// SetAppInfo adds custom application name and version to the User-Agent header for all requests. +// This helps identify your application in server logs and metrics. +// +// Parameters: +// - appName: Name of the application +// - appVersion: Version of the application +// +// Both parameters must be non-empty for the custom User-Agent to be set. +func (c *Client) SetAppInfo(appName, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// TraceOn enables HTTP request and response tracing for debugging purposes. +// All HTTP traffic will be written to the provided output stream. +// +// Parameters: +// - outputStream: Writer where trace output will be written (defaults to os.Stdout if nil) +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceErrorsOnlyOn enables HTTP tracing but only for requests that result in errors. +// This is useful for debugging without the overhead of tracing all requests. +// +// Parameters: +// - outputStream: Writer where trace output will be written (defaults to os.Stdout if nil) +func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { + c.TraceOn(outputStream) + c.traceErrorsOnly = true +} + +// TraceErrorsOnlyOff disables errors-only mode and traces all requests. +// To disable all tracing, call TraceOff() instead. +func (c *Client) TraceErrorsOnlyOff() { + c.traceErrorsOnly = false +} + +// TraceOff disables all HTTP tracing (both normal and errors-only modes). +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false + c.traceErrorsOnly = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// SetS3EnableDualstack turns s3 dual-stack endpoints on or off for all requests. +// The feature is only specific to S3 and is on by default. To read more about +// Amazon S3 dual-stack endpoints visit - +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html +func (c *Client) SetS3EnableDualstack(enabled bool) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3DualstackEnabled = enabled + } +} + +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]md5simd.Hasher) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = c.md5Hasher() + } else { + if isSha256Requested { + hashAlgos["sha256"] = c.sha256Hasher() + } + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = c.md5Hasher() + } + } + if isMd5Requested { + hashAlgos["md5"] = c.md5Hasher() + } + return hashAlgos, hashSums +} + +const ( + unknown = -1 + offline = 0 + online = 1 +) + +// IsOnline returns true if healthcheck enabled and client is online. +// If HealthCheck function has not been called this will always return true. +func (c *Client) IsOnline() bool { + return !c.IsOffline() +} + +// sets online healthStatus to offline +func (c *Client) markOffline() { + atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) +} + +// IsOffline returns true if healthcheck enabled and client is offline +// If HealthCheck function has not been called this will always return false. +func (c *Client) IsOffline() bool { + return atomic.LoadInt32(&c.healthStatus) == offline +} + +// HealthCheck starts a healthcheck to see if endpoint is up. +// Returns a context cancellation function, to stop the health check, +// and an error if health check is already started. +func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { + if atomic.LoadInt32(&c.healthStatus) != unknown { + return nil, fmt.Errorf("health check is running") + } + if hcDuration < 1*time.Second { + return nil, fmt.Errorf("health check duration should be at least 1 second") + } + probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") + ctx, cancelFn := context.WithCancel(context.Background()) + atomic.StoreInt32(&c.healthStatus, offline) + { + // Change to online, if we can connect. + gctx, gcancel := context.WithTimeout(ctx, 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if !IsNetworkOrHostDown(err, false) { + switch ToErrorResponse(err).Code { + case NoSuchBucket, AccessDenied, "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } + } + } + + go func(duration time.Duration) { + timer := time.NewTimer(duration) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + atomic.StoreInt32(&c.healthStatus, unknown) + return + case <-timer.C: + // Do health check the first time and ONLY if the connection is marked offline + if c.IsOffline() { + gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if !IsNetworkOrHostDown(err, false) { + switch ToErrorResponse(err).Code { + case NoSuchBucket, AccessDenied, "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } + } + } + + timer.Reset(duration) + } + } + }(hcDuration) + return cancelFn, nil +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + extraPresignHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum + streamSha256 bool + addCrc *ChecksumType + trailer http.Header // (http.Request).Trailer. Requires v4 signature. + + expect200OKWithError bool +} + +// dumpHTTP - dump HTTP request and response. +func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c *Client) do(req *http.Request) (resp *http.Response, err error) { + defer func() { + if IsNetworkOrHostDown(err, false) { + c.markOffline() + } + }() + + resp, err = c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang versions fix this issue properly. + if urlErr, ok := err.(*url.Error); ok { + if strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + } + return nil, err + } + + // Response cannot be non-nil, report error if thats the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, errInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response, + // except when the traceErrorsOnly enabled and the response's status code is ok + if c.isTraceEnabled && (!c.traceErrorsOnly || resp.StatusCode != http.StatusOK) { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +// List of success status. +var successStatus = map[int]struct{}{ + http.StatusOK: {}, + http.StatusNoContent: {}, + http.StatusPartialContent: {}, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + if c.IsOffline() { + return nil, errors.New(c.endpointURL.String() + " is offline.") + } + + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + reqRetry := c.maxRetries // Indicates how many times we can retry the request + + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, retryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + retryable = false + } + // Retry only when reader is seekable + if !retryable { + reqRetry = 1 + } + + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } + } + + if metadata.addCrc != nil && metadata.contentLength > 0 { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := metadata.addCrc.Hasher() + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil))) + } + + for range c.newRetryTimer(ctx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if retryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(ctx, method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + return nil, err + } + + // Initiate the request. + res, err = c.do(req) + if err != nil { + if isRequestErrorRetryable(ctx, err) { + // Retry the request + continue + } + return nil, err + } + + _, success := successStatus[res.StatusCode] + if success && !metadata.expect200OKWithError { + // We do not expect 2xx to return an error return. + return res, nil + } // in all other situations we must first parse the body as ErrorResponse + + // 5MiB is sufficiently large enough to hold any error or regular XML response. + var bodyBytes []byte + bodyBytes, err = io.ReadAll(io.LimitReader(res.Body, 5*humanize.MiByte)) + // By now, res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + bodySeeker := bytes.NewReader(bodyBytes) + res.Body = io.NopCloser(bodySeeker) + + apiErr := httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName) + + // Save the body back again. + bodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = io.NopCloser(bodySeeker) + + if apiErr == nil { + return res, nil + } + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(apiErr) + err = errResponse + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + // + // Additionally, we should only retry if bucketLocation and custom + // region is empty. + if c.region == "" { + switch errResponse.Code { + case AuthorizationHeaderMalformed: + fallthrough + case InvalidRegion: + fallthrough + case AccessDenied: + if errResponse.Region == "" { + // Region is empty we simply return the error. + return res, err + } + // Region is not empty figure out a way to + // handle this appropriately. + if metadata.bucketName != "" { + // Gather Cached location only if bucketName is present. + if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + } else { + // This is for ListBuckets() fallback. + if errResponse.Region != metadata.bucketLocation { + // Retry if the error response has a different region + // than the request we just made. + metadata.bucketLocation = errResponse.Region + continue // Retry + } + } + } + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + + // Return an error when retry is canceled or deadlined + if e := ctx.Err(); e != nil { + return nil, e + } + + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = http.MethodPost + } + + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(ctx, metadata.bucketName) + if err != nil { + return nil, err + } + } + if location == "" { + location = getDefaultLocation(*c.endpointURL, c.region) + } + } + + // Look if target url supports virtual host. + // We explicitly disallow MakeBucket calls to not use virtual DNS style, + // since the resolution may fail. + isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, + isVirtualHost, metadata.queryValues) + if err != nil { + return nil, err + } + + if c.httpTrace != nil { + ctx = httptrace.WithClientTrace(ctx, c.httpTrace) + } + + // make sure to de-dup calls to credential services, this reduces + // the overall load to the endpoint generating credential service. + value, err, _ := c.credsGroup.Do(metadata.bucketName, func() (credentials.Value, error) { + if s3utils.IsS3ExpressBucket(metadata.bucketName) && s3utils.IsAmazonEndpoint(*c.endpointURL) { + return c.CreateSession(ctx, metadata.bucketName, SessionReadWrite) + } + // Get credentials from the configured credentials provider. + return c.credsProvider.GetWithContext(c.CredContext()) + }) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + if s3utils.IsS3ExpressBucket(metadata.bucketName) && sessionToken != "" { + req.Header.Set("x-amz-s3session-token", sessionToken) + } + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if signerType.IsAnonymous() { + return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + } + if metadata.extraPresignHeader != nil { + if signerType.IsV2() { + return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") + } + for k, v := range metadata.extraPresignHeader { + req.Header.Set(k, v[0]) + } + } + if signerType.IsV2() { + // Presign URL with signature v2. + req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + } else if signerType.IsV4() { + // Presign URL with signature v4. + req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + } + return req, nil + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = io.NopCloser(metadata.contentBody) + } + + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} + } + + // set md5Sum for content protection. + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) + } + + // For anonymous requests just return. + if signerType.IsAnonymous() { + if len(metadata.trailer) > 0 { + req.Header.Set("X-Amz-Content-Sha256", unsignedPayloadTrailer) + return signer.UnsignedTrailer(*req, metadata.trailer), nil + } + + return req, nil + } + + switch { + case signerType.IsV2(): + // Add signature version '2' authorization header. + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.streamSha256 && !c.secure: + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. + if s3utils.IsAmazonExpressRegionalEndpoint(*c.endpointURL) { + req = signer.StreamingSignV4Express(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) + } else { + req = signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) + } + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + if s3utils.IsAmazonExpressRegionalEndpoint(*c.endpointURL) { + req = signer.SignV4TrailerExpress(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) + } else { + // Add signature version '4' authorization header. + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) + } + } + + // Return request. + return req, nil +} + +// set User agent. +func (c *Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, errTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { + if s3utils.IsAmazonExpressRegionalEndpoint(*c.endpointURL) { + if bucketName == "" { + host = getS3ExpressEndpoint(bucketLocation, false) + } else { + // Fetch new host based on the bucket location. + host = getS3ExpressEndpoint(bucketLocation, s3utils.IsS3ExpressBucket(bucketName)) + } + } else { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation, c.s3DualstackEnabled) + } + } + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { + host = "[" + h + "]" + } + } + } + + urlStr := scheme + "://" + host + "/" + + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr += s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr += s3utils.EncodePath(objectName) + } + } + } + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + + return url.Parse(urlStr) +} + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if c.lookupFn != nil { + lookup := c.lookupFn(url, bucketName) + switch lookup { + case BucketLookupDNS: + return true + case BucketLookupPath: + return false + } + // if its auto then we fallback to default detection. + return s3utils.IsVirtualHostSupported(url, bucketName) + } + + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} + +// CredContext returns the context for fetching credentials +func (c *Client) CredContext() *credentials.CredContext { + httpClient := c.httpClient + if httpClient == nil { + httpClient = http.DefaultClient + } + return &credentials.CredContext{ + Client: httpClient, + Endpoint: c.endpointURL.String(), + } +} + +// GetCreds returns the access creds for the client +func (c *Client) GetCreds() (credentials.Value, error) { + if c.credsProvider == nil { + return credentials.Value{}, errors.New("no credentials provider") + } + return c.credsProvider.GetWithContext(c.CredContext()) +} diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go new file mode 100644 index 000000000000..a37a72ae8ec7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -0,0 +1,214 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(ctx, bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(ctx, bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + switch errResp.Code { + case NotImplemented: + switch errResp.Server { + case "AmazonSnowball": + return "snowball", nil + case "cloudflare": + return "us-east-1", nil + } + case AuthorizationHeaderMalformed: + fallthrough + case InvalidRegion: + fallthrough + case AccessDenied: + if errResp.Region == "" { + return "us-east-1", nil + } + return errResp.Region, nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := *c.endpointURL + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + targetURL.Host = h + if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { + targetURL.Host = "[" + h + "]" + } + } + } + + isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName) + + var urlStr string + + if isVirtualStyle { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" + } else { + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + urlStr = targetURL.String() + } + + // Get a new HTTP request for the method. + req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.GetWithContext(c.CredContext()) + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() { + return req, nil + } + + if signerType.IsV2() { + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle) + return req, nil + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go new file mode 100644 index 000000000000..0feb89bf82e3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/checksum.go @@ -0,0 +1,474 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2023 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/binary" + "errors" + "hash" + "io" + "math/bits" + "net/http" + "sort" + "strings" + + "github.com/klauspost/crc32" + "github.com/minio/crc64nvme" +) + +// ChecksumMode contains information about the checksum mode on the object +type ChecksumMode uint32 + +const ( + // ChecksumFullObjectMode Full object checksum `csumCombine(csum1, csum2...)...), csumN...)` + ChecksumFullObjectMode ChecksumMode = 1 << iota + + // ChecksumCompositeMode Composite checksum `csum([csum1 + csum2 ... + csumN])` + ChecksumCompositeMode + + // Keep after all valid checksums + checksumLastMode + + // checksumModeMask is a mask for valid checksum mode types. + checksumModeMask = checksumLastMode - 1 +) + +// Is returns if c is all of t. +func (c ChecksumMode) Is(t ChecksumMode) bool { + return c&t == t +} + +// Key returns the header key. +func (c ChecksumMode) Key() string { + return amzChecksumMode +} + +func (c ChecksumMode) String() string { + switch c & checksumModeMask { + case ChecksumFullObjectMode: + return "FULL_OBJECT" + case ChecksumCompositeMode: + return "COMPOSITE" + } + return "" +} + +// ChecksumType contains information about the checksum type. +type ChecksumType uint32 + +const ( + + // ChecksumSHA256 indicates a SHA256 checksum. + ChecksumSHA256 ChecksumType = 1 << iota + // ChecksumSHA1 indicates a SHA-1 checksum. + ChecksumSHA1 + // ChecksumCRC32 indicates a CRC32 checksum with IEEE table. + ChecksumCRC32 + // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table. + ChecksumCRC32C + // ChecksumCRC64NVME indicates CRC64 with 0xad93d23594c93659 polynomial. + ChecksumCRC64NVME + + // Keep after all valid checksums + checksumLast + + // ChecksumFullObject is a modifier that can be used on CRC32 and CRC32C + // to indicate full object checksums. + ChecksumFullObject + + // checksumMask is a mask for valid checksum types. + checksumMask = checksumLast - 1 + + // ChecksumNone indicates no checksum. + ChecksumNone ChecksumType = 0 + + // ChecksumFullObjectCRC32 indicates full object CRC32 + ChecksumFullObjectCRC32 = ChecksumCRC32 | ChecksumFullObject + + // ChecksumFullObjectCRC32C indicates full object CRC32C + ChecksumFullObjectCRC32C = ChecksumCRC32C | ChecksumFullObject + + amzChecksumAlgo = "x-amz-checksum-algorithm" + amzChecksumCRC32 = "x-amz-checksum-crc32" + amzChecksumCRC32C = "x-amz-checksum-crc32c" + amzChecksumSHA1 = "x-amz-checksum-sha1" + amzChecksumSHA256 = "x-amz-checksum-sha256" + amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme" + amzChecksumMode = "x-amz-checksum-type" +) + +// Base returns the base type, without modifiers. +func (c ChecksumType) Base() ChecksumType { + return c & checksumMask +} + +// Is returns if c is all of t. +func (c ChecksumType) Is(t ChecksumType) bool { + return c&t == t +} + +// Key returns the header key. +// returns empty string if invalid or none. +func (c ChecksumType) Key() string { + switch c & checksumMask { + case ChecksumCRC32: + return amzChecksumCRC32 + case ChecksumCRC32C: + return amzChecksumCRC32C + case ChecksumSHA1: + return amzChecksumSHA1 + case ChecksumSHA256: + return amzChecksumSHA256 + case ChecksumCRC64NVME: + return amzChecksumCRC64NVME + } + return "" +} + +// CanComposite will return if the checksum type can be used for composite multipart upload on AWS. +func (c ChecksumType) CanComposite() bool { + switch c & checksumMask { + case ChecksumSHA256, ChecksumSHA1, ChecksumCRC32, ChecksumCRC32C: + return true + } + return false +} + +// CanMergeCRC will return if the checksum type can be used for multipart upload on AWS. +func (c ChecksumType) CanMergeCRC() bool { + switch c & checksumMask { + case ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME: + return true + } + return false +} + +// FullObjectRequested will return if the checksum type indicates full object checksum was requested. +func (c ChecksumType) FullObjectRequested() bool { + switch c & (ChecksumFullObject | checksumMask) { + case ChecksumFullObjectCRC32C, ChecksumFullObjectCRC32, ChecksumCRC64NVME: + return true + } + return false +} + +// KeyCapitalized returns the capitalized key as used in HTTP headers. +func (c ChecksumType) KeyCapitalized() string { + return http.CanonicalHeaderKey(c.Key()) +} + +// RawByteLen returns the size of the un-encoded checksum. +func (c ChecksumType) RawByteLen() int { + switch c & checksumMask { + case ChecksumCRC32, ChecksumCRC32C: + return 4 + case ChecksumSHA1: + return sha1.Size + case ChecksumSHA256: + return sha256.Size + case ChecksumCRC64NVME: + return crc64nvme.Size + } + return 0 +} + +const crc64NVMEPolynomial = 0xad93d23594c93659 + +// Hasher returns a hasher corresponding to the checksum type. +// Returns nil if no checksum. +func (c ChecksumType) Hasher() hash.Hash { + switch c & checksumMask { + case ChecksumCRC32: + return crc32.NewIEEE() + case ChecksumCRC32C: + return crc32.New(crc32.MakeTable(crc32.Castagnoli)) + case ChecksumSHA1: + return sha1.New() + case ChecksumSHA256: + return sha256.New() + case ChecksumCRC64NVME: + return crc64nvme.New() + } + return nil +} + +// IsSet returns whether the type is valid and known. +func (c ChecksumType) IsSet() bool { + return bits.OnesCount32(uint32(c&checksumMask)) == 1 +} + +// SetDefault will set the checksum if not already set. +func (c *ChecksumType) SetDefault(t ChecksumType) { + if !c.IsSet() { + *c = t + } +} + +// EncodeToString the encoded hash value of the content provided in b. +func (c ChecksumType) EncodeToString(b []byte) string { + if !c.IsSet() { + return "" + } + h := c.Hasher() + h.Write(b) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +// String returns the type as a string. +// CRC32, CRC32C, SHA1, and SHA256 for valid values. +// Empty string for unset and "" if not valid. +func (c ChecksumType) String() string { + switch c & checksumMask { + case ChecksumCRC32: + return "CRC32" + case ChecksumCRC32C: + return "CRC32C" + case ChecksumSHA1: + return "SHA1" + case ChecksumSHA256: + return "SHA256" + case ChecksumNone: + return "" + case ChecksumCRC64NVME: + return "CRC64NVME" + } + return "" +} + +// ChecksumReader reads all of r and returns a checksum of type c. +// Returns any error that may have occurred while reading. +func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) { + h := c.Hasher() + if h == nil { + return Checksum{}, nil + } + _, err := io.Copy(h, r) + if err != nil { + return Checksum{}, err + } + return NewChecksum(c, h.Sum(nil)), nil +} + +// ChecksumBytes returns a checksum of the content b with type c. +func (c ChecksumType) ChecksumBytes(b []byte) Checksum { + h := c.Hasher() + if h == nil { + return Checksum{} + } + n, err := h.Write(b) + if err != nil || n != len(b) { + // Shouldn't happen with these checksummers. + return Checksum{} + } + return NewChecksum(c, h.Sum(nil)) +} + +// Checksum is a type and encoded value. +type Checksum struct { + Type ChecksumType + r []byte +} + +// NewChecksum sets the checksum to the value of b, +// which is the raw hash output. +// If the length of c does not match t.RawByteLen, +// a checksum with ChecksumNone is returned. +func NewChecksum(t ChecksumType, b []byte) Checksum { + if t.IsSet() && len(b) == t.RawByteLen() { + return Checksum{Type: t, r: b} + } + return Checksum{} +} + +// NewChecksumString sets the checksum to the value of s, +// which is the base 64 encoded raw hash output. +// If the length of c does not match t.RawByteLen, it is not added. +func NewChecksumString(t ChecksumType, s string) Checksum { + b, _ := base64.StdEncoding.DecodeString(s) + if t.IsSet() && len(b) == t.RawByteLen() { + return Checksum{Type: t, r: b} + } + return Checksum{} +} + +// IsSet returns whether the checksum is valid and known. +func (c Checksum) IsSet() bool { + return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen() +} + +// Encoded returns the encoded value. +// Returns the empty string if not set or valid. +func (c Checksum) Encoded() string { + if !c.IsSet() { + return "" + } + return base64.StdEncoding.EncodeToString(c.r) +} + +// Raw returns the raw checksum value if set. +func (c Checksum) Raw() []byte { + if !c.IsSet() { + return nil + } + return c.r +} + +// CompositeChecksum returns the composite checksum of all provided parts. +func (c ChecksumType) CompositeChecksum(p []ObjectPart) (*Checksum, error) { + if !c.CanComposite() { + return nil, errors.New("cannot do composite checksum") + } + sort.Slice(p, func(i, j int) bool { + return p[i].PartNumber < p[j].PartNumber + }) + c = c.Base() + crcBytes := make([]byte, 0, len(p)*c.RawByteLen()) + for _, part := range p { + pCrc, err := part.ChecksumRaw(c) + if err != nil { + return nil, err + } + crcBytes = append(crcBytes, pCrc...) + } + h := c.Hasher() + h.Write(crcBytes) + return &Checksum{Type: c, r: h.Sum(nil)}, nil +} + +// FullObjectChecksum will return the full object checksum from provided parts. +func (c ChecksumType) FullObjectChecksum(p []ObjectPart) (*Checksum, error) { + if !c.CanMergeCRC() { + return nil, errors.New("cannot merge this checksum type") + } + c = c.Base() + sort.Slice(p, func(i, j int) bool { + return p[i].PartNumber < p[j].PartNumber + }) + + switch len(p) { + case 0: + return nil, errors.New("no parts given") + case 1: + check, err := p[0].ChecksumRaw(c) + if err != nil { + return nil, err + } + return &Checksum{ + Type: c, + r: check, + }, nil + } + var merged uint32 + var merged64 uint64 + first, err := p[0].ChecksumRaw(c) + if err != nil { + return nil, err + } + sz := p[0].Size + switch c { + case ChecksumCRC32, ChecksumCRC32C: + merged = binary.BigEndian.Uint32(first) + case ChecksumCRC64NVME: + merged64 = binary.BigEndian.Uint64(first) + } + + poly32 := uint32(crc32.IEEE) + if c.Is(ChecksumCRC32C) { + poly32 = crc32.Castagnoli + } + for _, part := range p[1:] { + if part.Size == 0 { + continue + } + sz += part.Size + pCrc, err := part.ChecksumRaw(c) + if err != nil { + return nil, err + } + switch c { + case ChecksumCRC32, ChecksumCRC32C: + merged = crc32Combine(poly32, merged, binary.BigEndian.Uint32(pCrc), part.Size) + case ChecksumCRC64NVME: + merged64 = crc64Combine(bits.Reverse64(crc64NVMEPolynomial), merged64, binary.BigEndian.Uint64(pCrc), part.Size) + } + } + var tmp [8]byte + switch c { + case ChecksumCRC32, ChecksumCRC32C: + binary.BigEndian.PutUint32(tmp[:], merged) + return &Checksum{ + Type: c, + r: tmp[:4], + }, nil + case ChecksumCRC64NVME: + binary.BigEndian.PutUint64(tmp[:], merged64) + return &Checksum{ + Type: c, + r: tmp[:8], + }, nil + default: + return nil, errors.New("unknown checksum type") + } +} + +func addAutoChecksumHeaders(opts *PutObjectOptions) { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + + addChecksum := true + for k := range opts.UserMetadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + addChecksum = false + } + } + + if addChecksum && opts.AutoChecksum.IsSet() { + opts.UserMetadata[amzChecksumAlgo] = opts.AutoChecksum.String() + if opts.AutoChecksum.FullObjectRequested() { + opts.UserMetadata[amzChecksumMode] = ChecksumFullObjectMode.String() + } + } +} + +func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) { + if !opts.AutoChecksum.IsSet() { + return + } + if opts.AutoChecksum.CanComposite() && !opts.AutoChecksum.Is(ChecksumFullObject) { + // Add composite hash of hashes. + crc, err := opts.AutoChecksum.CompositeChecksum(allParts) + if err == nil { + opts.UserMetadata = map[string]string{ + opts.AutoChecksum.Key(): crc.Encoded(), + amzChecksumMode: ChecksumCompositeMode.String(), + } + } + } else if opts.AutoChecksum.CanMergeCRC() { + crc, err := opts.AutoChecksum.FullObjectChecksum(allParts) + if err == nil { + opts.UserMetadata = map[string]string{ + opts.AutoChecksum.Key(): crc.Encoded(), + amzChecksumMode: ChecksumFullObjectMode.String(), + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md new file mode 100644 index 000000000000..7dcdbfc3e0ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md @@ -0,0 +1,52 @@ +Contributor Covenant Code of Conduct +==================================== + +Our Pledge +---------- + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +Our Standards +------------- + +Examples of behavior that contributes to creating a positive environment include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +Our Responsibilities +-------------------- + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior, in compliance with the licensing terms applying to the Project developments. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. However, these actions shall respect the licensing terms of the Project Developments that will always supersede such Code of Conduct. + +Scope +----- + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +Enforcement +----------- + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at dev@min.io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +Attribution +----------- + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.4, available at [http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4/) + +This version includes a clarification to ensure that the code of conduct is in compliance with the free software licensing terms of the project. diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go new file mode 100644 index 000000000000..4099a37f9a54 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -0,0 +1,130 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Multipart upload defaults. + +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 16MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 16 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + +// Total number of parallel workers used for multipart operation. +const totalWorkers = 4 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +const ( + // GetObjectAttributesTags are tags used to defined + // return values for the GetObjectAttributes API + GetObjectAttributesTags = "ETag,Checksum,StorageClass,ObjectSize,ObjectParts" + // GetObjectAttributesMaxParts defined the default maximum + // number of parts returned by GetObjectAttributes + GetObjectAttributesMaxParts = 1000 +) + +const ( + // Response Headers + + // ETag is a common response header + ETag = "ETag" + + // Storage class header. + amzStorageClass = "X-Amz-Storage-Class" + + // Website redirect location header + amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + + // GetObjectAttributes headers + amzPartNumberMarker = "X-Amz-Part-Number-Marker" + amzExpectedBucketOnwer = "X-Amz-Expected-Bucket-Owner" + amzMaxParts = "X-Amz-Max-Parts" + amzObjectAttributes = "X-Amz-Object-Attributes" + + // Object Tagging headers + amzTaggingHeader = "X-Amz-Tagging" + amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" + + amzVersionID = "X-Amz-Version-Id" + amzTaggingCount = "X-Amz-Tagging-Count" + amzExpiration = "X-Amz-Expiration" + amzRestore = "X-Amz-Restore" + amzReplicationStatus = "X-Amz-Replication-Status" + amzDeleteMarker = "X-Amz-Delete-Marker" + + // Object legal hold header + amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" + + // Object retention header + amzLockMode = "X-Amz-Object-Lock-Mode" + amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" + amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" + + // Replication status + amzBucketReplicationStatus = "X-Amz-Replication-Status" + // Minio specific Replication/lifecycle transition extension + minIOBucketSourceMTime = "X-Minio-Source-Mtime" + + minIOBucketSourceETag = "X-Minio-Source-Etag" + minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" + minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" + minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" + minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check" + + // Header indicates last tag update time on source + minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" + // Header indicates last retention update time on source + minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" + // Header indicates last legalhold update time on source + minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" + minIOForceDelete = "x-minio-force-delete" + // Header indicates delete marker replication request can be sent by source now. + minioTgtReplicationReady = "X-Minio-Replication-Ready" + // Header asks if delete marker replication request can be sent by source now. + isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" +) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go new file mode 100644 index 000000000000..99b99db9b870 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -0,0 +1,151 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, opts *Options) (*Core, error) { + var s3Client Core + client, err := New(endpoint, opts) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) +} + +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { + return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) +} + +// CopyObjectPart - creates a part in a multipart upload by copying (a +// part of) an existing object. +func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { + return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, + partID, startOffset, length, metadata) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { + hookReader := newHook(data, opts.Progress) + return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPartOptions contains options for PutObjectPart API +type PutObjectPartOptions struct { + Md5Base64, Sha256Hex string + SSE encrypt.ServerSide + CustomHeader, Trailer http.Header + DisableContentSha256 bool +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, + data io.Reader, size int64, opts PutObjectPartOptions, +) (ObjectPart, error) { + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: opts.Md5Base64, + sha256Hex: opts.Sha256Hex, + size: size, + sse: opts.SSE, + streamSha256: !opts.DisableContentSha256, + customHeader: opts.CustomHeader, + trailer: opts.Trailer, + } + return c.uploadPart(ctx, p) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) { + res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }, opts) + return res, err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { + return c.abortMultipartUpload(ctx, bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { + return c.getBucketPolicy(ctx, bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { + return c.putBucketPolicy(ctx, bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + return c.getObject(ctx, bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/create-session.go b/vendor/github.com/minio/minio-go/v7/create-session.go new file mode 100644 index 000000000000..47c286564e78 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/create-session.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "errors" + "net" + "net/http" + "net/url" + "path" + "time" + + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// SessionMode - session mode type there are only two types +type SessionMode string + +// Session constants +const ( + SessionReadWrite SessionMode = "ReadWrite" + SessionReadOnly SessionMode = "ReadOnly" +) + +type createSessionResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateSessionResult"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + } `xml:",omitempty"` +} + +// CreateSession - https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// the returning credentials may be cached depending on the expiration of the original +// credential, credentials will get renewed 10 secs earlier than when its gonna expire +// allowing for some leeway in the renewal process. +func (c *Client) CreateSession(ctx context.Context, bucketName string, sessionMode SessionMode) (cred credentials.Value, err error) { + if err := s3utils.CheckValidBucketNameS3Express(bucketName); err != nil { + return credentials.Value{}, err + } + + v, ok := c.bucketSessionCache.Get(bucketName) + if ok && v.Expiration.After(time.Now().Add(10*time.Second)) { + // Verify if the credentials will not expire + // in another 10 seconds, if not we renew it again. + return v, nil + } + + req, err := c.createSessionRequest(ctx, bucketName, sessionMode) + if err != nil { + return credentials.Value{}, err + } + + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return credentials.Value{}, err + } + + if resp.StatusCode != http.StatusOK { + return credentials.Value{}, httpRespToErrorResponse(resp, bucketName, "") + } + + credSession := &createSessionResult{} + dec := xml.NewDecoder(resp.Body) + if err = dec.Decode(credSession); err != nil { + return credentials.Value{}, err + } + + defer c.bucketSessionCache.Set(bucketName, cred) + + return credentials.Value{ + AccessKeyID: credSession.Credentials.AccessKey, + SecretAccessKey: credSession.Credentials.SecretKey, + SessionToken: credSession.Credentials.SessionToken, + Expiration: credSession.Credentials.Expiration, + }, nil +} + +// createSessionRequest - Wrapper creates a new CreateSession request. +func (c *Client) createSessionRequest(ctx context.Context, bucketName string, sessionMode SessionMode) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("session", "") + + // Set get bucket location always as path style. + targetURL := *c.endpointURL + + // Fetch new host based on the bucket location. + host := getS3ExpressEndpoint(c.region, s3utils.IsS3ExpressBucket(bucketName)) + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + host = h + if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { + host = "[" + h + "]" + } + } + } + + isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName) + + var urlStr string + + if isVirtualStyle { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + host + "/?session" + } else { + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + urlStr = targetURL.String() + } + + // Get a new HTTP request for the method. + req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.GetWithContext(c.CredContext()) + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() || signerType.IsV2() { + return req, errors.New("Only signature v4 is supported for CreateSession() API") + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req.Header.Set("x-amz-create-session-mode", string(sessionMode)) + req = signer.SignV4Express(*req, accessKeyID, secretAccessKey, sessionToken, c.region) + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/endpoints.go b/vendor/github.com/minio/minio-go/v7/endpoints.go new file mode 100644 index 000000000000..34b340b39126 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/endpoints.go @@ -0,0 +1,276 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +type awsS3Endpoint struct { + endpoint string + dualstackEndpoint string +} + +type awsS3ExpressEndpoint struct { + regionalEndpoint string + zonalEndpoints []string +} + +var awsS3ExpressEndpointMap = map[string]awsS3ExpressEndpoint{ + "us-east-1": { + "s3express-control.us-east-1.amazonaws.com", + []string{ + "s3express-use1-az4.us-east-1.amazonaws.com", + "s3express-use1-az5.us-east-1.amazonaws.com", + "3express-use1-az6.us-east-1.amazonaws.com", + }, + }, + "us-east-2": { + "s3express-control.us-east-2.amazonaws.com", + []string{ + "s3express-use2-az1.us-east-2.amazonaws.com", + "s3express-use2-az2.us-east-2.amazonaws.com", + }, + }, + "us-west-2": { + "s3express-control.us-west-2.amazonaws.com", + []string{ + "s3express-usw2-az1.us-west-2.amazonaws.com", + "s3express-usw2-az3.us-west-2.amazonaws.com", + "s3express-usw2-az4.us-west-2.amazonaws.com", + }, + }, + "ap-south-1": { + "s3express-control.ap-south-1.amazonaws.com", + []string{ + "s3express-aps1-az1.ap-south-1.amazonaws.com", + "s3express-aps1-az3.ap-south-1.amazonaws.com", + }, + }, + "ap-northeast-1": { + "s3express-control.ap-northeast-1.amazonaws.com", + []string{ + "s3express-apne1-az1.ap-northeast-1.amazonaws.com", + "s3express-apne1-az4.ap-northeast-1.amazonaws.com", + }, + }, + "eu-west-1": { + "s3express-control.eu-west-1.amazonaws.com", + []string{ + "s3express-euw1-az1.eu-west-1.amazonaws.com", + "s3express-euw1-az3.eu-west-1.amazonaws.com", + }, + }, + "eu-north-1": { + "s3express-control.eu-north-1.amazonaws.com", + []string{ + "s3express-eun1-az1.eu-north-1.amazonaws.com", + "s3express-eun1-az2.eu-north-1.amazonaws.com", + "s3express-eun1-az3.eu-north-1.amazonaws.com", + }, + }, +} + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]awsS3Endpoint{ + "us-east-1": { + "s3.us-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + }, + "us-east-2": { + "s3.us-east-2.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + }, + "us-iso-east-1": { + "s3.us-iso-east-1.c2s.ic.gov", + "s3.dualstack.us-iso-east-1.c2s.ic.gov", + }, + "us-isob-east-1": { + "s3.us-isob-east-1.sc2s.sgov.gov", + "s3.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, + "us-iso-west-1": { + "s3.us-iso-west-1.c2s.ic.gov", + "s3.dualstack.us-iso-west-1.c2s.ic.gov", + }, + "us-west-2": { + "s3.us-west-2.amazonaws.com", + "s3.dualstack.us-west-2.amazonaws.com", + }, + "us-west-1": { + "s3.us-west-1.amazonaws.com", + "s3.dualstack.us-west-1.amazonaws.com", + }, + "ca-central-1": { + "s3.ca-central-1.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + }, + "ca-west-1": { + "s3.ca-west-1.amazonaws.com", + "s3.dualstack.ca-west-1.amazonaws.com", + }, + "eu-west-1": { + "s3.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + }, + "eu-west-2": { + "s3.eu-west-2.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + }, + "eu-west-3": { + "s3.eu-west-3.amazonaws.com", + "s3.dualstack.eu-west-3.amazonaws.com", + }, + "eu-central-1": { + "s3.eu-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + }, + "eu-central-2": { + "s3.eu-central-2.amazonaws.com", + "s3.dualstack.eu-central-2.amazonaws.com", + }, + "eu-north-1": { + "s3.eu-north-1.amazonaws.com", + "s3.dualstack.eu-north-1.amazonaws.com", + }, + "eu-south-1": { + "s3.eu-south-1.amazonaws.com", + "s3.dualstack.eu-south-1.amazonaws.com", + }, + "eu-south-2": { + "s3.eu-south-2.amazonaws.com", + "s3.dualstack.eu-south-2.amazonaws.com", + }, + "ap-east-1": { + "s3.ap-east-1.amazonaws.com", + "s3.dualstack.ap-east-1.amazonaws.com", + }, + "ap-south-1": { + "s3.ap-south-1.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + }, + "ap-south-2": { + "s3.ap-south-2.amazonaws.com", + "s3.dualstack.ap-south-2.amazonaws.com", + }, + "ap-southeast-1": { + "s3.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + }, + "ap-southeast-2": { + "s3.ap-southeast-2.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + }, + "ap-southeast-3": { + "s3.ap-southeast-3.amazonaws.com", + "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + "ap-southeast-4": { + "s3.ap-southeast-4.amazonaws.com", + "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + "ap-northeast-1": { + "s3.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": { + "s3.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + "ap-northeast-3": { + "s3.ap-northeast-3.amazonaws.com", + "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + "af-south-1": { + "s3.af-south-1.amazonaws.com", + "s3.dualstack.af-south-1.amazonaws.com", + }, + "me-central-1": { + "s3.me-central-1.amazonaws.com", + "s3.dualstack.me-central-1.amazonaws.com", + }, + "me-south-1": { + "s3.me-south-1.amazonaws.com", + "s3.dualstack.me-south-1.amazonaws.com", + }, + "sa-east-1": { + "s3.sa-east-1.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + }, + "us-gov-west-1": { + "s3.us-gov-west-1.amazonaws.com", + "s3.dualstack.us-gov-west-1.amazonaws.com", + }, + "us-gov-east-1": { + "s3.us-gov-east-1.amazonaws.com", + "s3.dualstack.us-gov-east-1.amazonaws.com", + }, + "cn-north-1": { + "s3.cn-north-1.amazonaws.com.cn", + "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + "cn-northwest-1": { + "s3.cn-northwest-1.amazonaws.com.cn", + "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + "il-central-1": { + "s3.il-central-1.amazonaws.com", + "s3.dualstack.il-central-1.amazonaws.com", + }, + "ap-southeast-5": { + "s3.ap-southeast-5.amazonaws.com", + "s3.dualstack.ap-southeast-5.amazonaws.com", + }, + "ap-southeast-7": { + "s3.ap-southeast-7.amazonaws.com", + "s3.dualstack.ap-southeast-7.amazonaws.com", + }, + "mx-central-1": { + "s3.mx-central-1.amazonaws.com", + "s3.dualstack.mx-central-1.amazonaws.com", + }, + "ap-east-2": { + "s3.ap-east-2.amazonaws.com", + "s3.dualstack.ap-east-2.amazonaws.com", + }, +} + +// getS3ExpressEndpoint get Amazon S3 Express endpoing based on the region +// optionally if zonal is set returns first zonal endpoint. +func getS3ExpressEndpoint(region string, zonal bool) (endpoint string) { + s3ExpEndpoint, ok := awsS3ExpressEndpointMap[region] + if !ok { + return "" + } + if zonal { + return s3ExpEndpoint.zonalEndpoints[0] + } + return s3ExpEndpoint.regionalEndpoint +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string, useDualstack bool) (endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.us-east-1.amazonaws.com' endpoint. + if useDualstack { + return "s3.dualstack.us-east-1.amazonaws.com" + } + return "s3.us-east-1.amazonaws.com" + } + if useDualstack { + return s3Endpoint.dualstackEndpoint + } + return s3Endpoint.endpoint +} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go new file mode 100644 index 000000000000..4f8f9dd8cc79 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -0,0 +1,14791 @@ +//go:build mint +// +build mint + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "archive/zip" + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "iter" + "log/slog" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/google/uuid" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/cors" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/tags" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +func baseLogger(testName, function string, args map[string]interface{}, startTime time.Time) *slog.Logger { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + l := slog.With( + "name", "minio-go: "+testName, + "duration", duration.Nanoseconds()/1000000, + ) + if function != "" { + l = l.With("function", function) + } + if len(args) > 0 { + l = l.With("args", args) + } + return l +} + +// log successful test runs +func logSuccess(testName, function string, args map[string]interface{}, startTime time.Time) { + baseLogger(testName, function, args, startTime). + With("status", "PASS"). + Info("") +} + +// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, +// and log as NA in that case and continue execution. Otherwise log as failure and return +func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { + // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests + // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in + // addition to NotImplemented error returned from server + if isErrNotImplemented(err) { + logIgnored(testName, function, args, startTime, message) + } else { + logFailure(testName, function, args, startTime, alert, message, err) + if !isRunOnFail() { + panic(fmt.Sprintf("Test failed with message: %s, err: %v", message, err)) + } + } +} + +// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run +func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { + l := baseLogger(testName, function, args, startTime).With( + "status", "FAIL", + "alert", alert, + "message", message, + ) + + if err != nil { + l = l.With("error", err) + } + + l.Error("") +} + +// log not applicable test runs +func logIgnored(testName, function string, args map[string]interface{}, startTime time.Time, alert string) { + baseLogger(testName, function, args, startTime). + With( + "status", "NA", + "alert", strings.Split(alert, " ")[0]+" is NotImplemented", + ).Info("") +} + +// Delete objects in given bucket, recursively +func cleanupBucket(bucketName string, c *minio.Client) error { + // Create a done channel to control 'ListObjectsV2' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + // Iterate over all objects in the bucket via listObjectsV2 and delete + for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { + if objCh.Err != nil { + return objCh.Err + } + if objCh.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + return c.RemoveBucket(context.Background(), bucketName) +} + +func cleanupVersionedBucket(bucketName string, c *minio.Client) error { + doneCh := make(chan struct{}) + defer close(doneCh) + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + if obj.Err != nil { + return obj.Err + } + if obj.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, obj.Key, + minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + slog.Info("found object", "key", obj.Key, "version", obj.VersionID) + } + } + return err +} + +func isErrNotImplemented(err error) bool { + return minio.ToErrorResponse(err).Code == minio.NotImplemented +} + +func isRunOnFail() bool { + return os.Getenv("RUN_ON_FAIL") == "1" +} + +func init() { + // If server endpoint is not set, all tests default to + // using https://play.min.io + if os.Getenv(serverEndpoint) == "" { + os.Setenv(serverEndpoint, "play.min.io") + os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") + os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") + os.Setenv(enableHTTPS, "1") + } +} + +var mintDataDir = os.Getenv("MINT_DATA_DIR") + +func getMintDataDirFilePath(filename string) (fp string) { + if mintDataDir == "" { + return fp + } + return filepath.Join(mintDataDir, filename) +} + +func newRandomReader(seed, size int64) io.Reader { + return io.LimitReader(rand.New(rand.NewSource(seed)), size) +} + +func mustCrcReader(r io.Reader) uint32 { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + return crc.Sum32() +} + +func crcMatches(r io.Reader, want uint32) error { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +func crcMatchesName(r io.Reader, name string) error { + want := dataFileCRC32[name] + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +// read data from file if it exists or optionally create a buffer of particular size +func getDataReader(fileName string) io.ReadCloser { + if mintDataDir == "" { + size := int64(dataFileMap[fileName]) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) + } + return io.NopCloser(newRandomReader(size, size)) + } + reader, _ := os.Open(getMintDataDirFilePath(fileName)) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(reader) + reader.Close() + reader, _ = os.Open(getMintDataDirFilePath(fileName)) + } + return reader +} + +// randString generates random names and prepends them with a known prefix. +func randString(n int, src rand.Source, prefix string) string { + b := make([]byte, n) + // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +var dataFileMap = map[string]int{ + "datafile-0-b": 0, + "datafile-1-b": 1, + "datafile-1-kB": 1 * humanize.KiByte, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-65-MB": 65 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, +} + +var dataFileCRC32 = map[string]uint32{} + +func isFullMode() bool { + return os.Getenv("MINT_MODE") == "full" +} + +func getFuncName() string { + return getFuncNameLoc(2) +} + +func getFuncNameLoc(caller int) string { + pc, _, _, _ := runtime.Caller(caller) + return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") +} + +type ClientConfig struct { + // MinIO client configuration + TraceOn bool // Turn on tracing of HTTP requests and responses to stderr + CredsV2 bool // Use V2 credentials if true, otherwise use v4 + TrailingHeaders bool // Send trailing headers in requests +} + +func NewClient(config ClientConfig) (*minio.Client, error) { + // Instantiate new MinIO client + var creds *credentials.Credentials + if config.CredsV2 { + creds = credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), "") + } else { + creds = credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), "") + } + opts := &minio.Options{ + Creds: creds, + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: config.TrailingHeaders, + } + client, err := minio.New(os.Getenv(serverEndpoint), opts) + if err != nil { + return nil, err + } + + if config.TraceOn { + client.TraceOn(os.Stderr) + } + + // Set user agent. + client.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + return client, nil +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket Failed", err) + return + } + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "Bucket already exists", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists && + minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testMetadataSizeLimit() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + logSuccess(testName, function, args, startTime) +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Get Object failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat Object failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return + } + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content types don't match", err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testListObjectVersions() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjectVersions(bucketName, prefix, recursive)" + args := map[string]interface{}{ + "bucketName": "", + "prefix": "", + "recursive": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected object deletion", err) + return + } + + var deleteMarkers, versions int + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if info.Key != objectName { + logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) + return + } + if info.IsDeleteMarker { + deleteMarkers++ + if !info.IsLatest { + logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) + return + } + } else { + versions++ + } + } + + if deleteMarkers != 1 { + logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) + return + } + + if versions != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testStatObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "StatObject" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + for i := 0; i < len(results); i++ { + opts := minio.StatObjectOptions{VersionID: results[i].VersionID} + statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during HEAD object", err) + return + } + if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Save the contents of datafiles to check with GetObject() reader output later + var buffers [][]byte + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + buffers = append(buffers, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.SliceStable(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testPutObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + const n = 10 + // Read input... + + // Save the data concurrently. + var wg sync.WaitGroup + wg.Add(n) + buffers := make([][]byte, n) + var errs [n]error + for i := 0; i < n; i++ { + r := newRandomReader(int64((1<<20)*i+i), int64(i)) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + buffers[i] = buf + + go func(i int) { + defer wg.Done() + _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", info.Err) + return + } + results = append(results, info) + } + + if len(results) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.Slice(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.Slice(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testListMultipartUpload() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + core := minio.Core{Client: c} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + ctx := context.Background() + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer func() { + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + } + }() + objName := "prefix/objectName" + + want := minio.ListMultipartUploadsResult{ + Bucket: bucketName, + KeyMarker: "", + UploadIDMarker: "", + NextKeyMarker: "", + NextUploadIDMarker: "", + EncodingType: "url", + MaxUploads: 1000, + IsTruncated: false, + Prefix: "prefix/objectName", + Delimiter: "/", + CommonPrefixes: nil, + } + for i := 0; i < 5; i++ { + uid, err := core.NewMultipartUpload(ctx, bucketName, objName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload failed", err) + return + } + want.Uploads = append(want.Uploads, minio.ObjectMultipartInfo{ + Initiated: time.Time{}, + StorageClass: "", + Key: objName, + Size: 0, + UploadID: uid, + Err: nil, + }) + + for j := 0; j < 5; j++ { + cmpGot := func(call string, got minio.ListMultipartUploadsResult) bool { + for i := range got.Uploads { + got.Uploads[i].Initiated = time.Time{} + } + if !reflect.DeepEqual(want, got) { + err := fmt.Errorf("want: %#v\ngot : %#v", want, got) + logError(testName, function, args, startTime, "", call+" failed", err) + } + return true + } + got, err := core.ListMultipartUploads(ctx, bucketName, objName, "", "", "/", 1000) + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-prefix", got) { + return + } + got, err = core.ListMultipartUploads(ctx, bucketName, objName, objName, "", "/", 1000) + got.KeyMarker = "" + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-marker", got) { + return + } + } + if i > 2 { + err = core.AbortMultipartUpload(ctx, bucketName, objName, uid) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + want.Uploads = want.Uploads[:len(want.Uploads)-1] + } + } + for _, up := range want.Uploads { + err = core.AbortMultipartUpload(ctx, bucketName, objName, up.UploadID) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + } + logSuccess(testName, function, args, startTime) +} + +func testCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := io.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testConcurrentCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy concurrently + const n = 10 + var wg sync.WaitGroup + wg.Add(n) + var errs [n]error + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) + infos = []minio.ObjectInfo{} + for info := range objectsInfo { + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := io.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + infos = append(infos, info) + } + + if len(infos) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testComposeObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + testFiles := []string{"datafile-5-MB", "datafile-10-kB"} + var testFilesBytes [][]byte + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + testFilesBytes = append(testFilesBytes, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Source objects to concatenate. We also specify decryption + // key for each + src1 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[0].VersionID, + } + + src2 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[1].VersionID, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + + _, err = c.ComposeObject(context.Background(), dst, src1, src2) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) + return + } + defer readerCopy.Close() + + copyContentBytes, err := io.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) + return + } + + var expectedContent []byte + for _, fileBytes := range testFilesBytes { + expectedContent = append(expectedContent, fileBytes...) + } + + if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testRemoveObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObject()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var version minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + version = info + break + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + // test delete marker version id is non-null + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // create delete marker + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + idx := 0 + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if idx == 0 { + if !info.IsDeleteMarker { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err) + return + } + } + idx++ + } + + defer cleanupBucket(bucketName, c) + + logSuccess(testName, function, args, startTime) +} + +func testRemoveObjectsWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObjects()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsVersions := make(chan minio.ObjectInfo) + go func() { + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, + minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsVersionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + objectsVersions <- info + } + close(objectsVersions) + }() + + removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) + return + } + + for e := range removeErrors { + if e.Err != nil { + logError(testName, function, args, startTime, "", "Single delete operation failed", err) + return + } + } + + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsVersionsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testObjectTaggingWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "{Get,Set,Remove}ObjectTagging()" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var versions []minio.ObjectInfo + for info := range versionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + versions = append(versions, info) + } + + sort.SliceStable(versions, func(i, j int) bool { + return versions[i].Size < versions[j].Size + }) + + tagsV1 := map[string]string{"key1": "val1"} + t1, err := tags.MapToObjectTags(tagsV1) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + tagsV2 := map[string]string{"key2": "val2"} + t2, err := tags.MapToObjectTags(tagsV2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + tagsEqual := func(tags1, tags2 map[string]string) bool { + for k1, v1 := range tags1 { + v2, found := tags2[k1] + if found { + if v1 != v2 { + return false + } + } + } + return true + } + + gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) + return + } + + gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) + return + } + + if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, + minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if len(emptyTags.ToMap()) != 0 { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with custom checksums. +func testPutObjectWithChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + {cs: minio.ChecksumCRC64NVME}, + } + + for _, test := range tests { + if os.Getenv("MINT_NO_FULL_OBJECT") != "" && test.cs.FullObjectRequested() { + continue + } + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + h := test.cs.Hasher() + h.Reset() + + if test.cs.IsSet() { + meta["x-amz-checksum-algorithm"] = test.cs.String() + } + + // Test with a bad CRC - we haven't called h.Write(b), so this is a checksum of empty data + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + args["metadata"] = meta + args["range"] = "false" + args["checksum"] = test.cs.String() + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: meta, + }) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err) + return + } + + // Set correct CRC. + h.Write(b) + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + reader.Close() + + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) + if resp.ChecksumMode != minio.ChecksumFullObjectMode.String() { + logError(testName, function, args, startTime, "", "Checksum mode is not full object", fmt.Errorf("got %s, want %s", resp.ChecksumMode, minio.ChecksumFullObjectMode.String())) + } + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(st.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) + if st.ChecksumMode != minio.ChecksumFullObjectMode.String() { + logError(testName, function, args, startTime, "", "Checksum mode is not full object", fmt.Errorf("got %s, want %s", st.ChecksumMode, minio.ChecksumFullObjectMode.String())) + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + args["range"] = "true" + err = gopts.SetRange(100, 1000) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + b, err = io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + st, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Range requests should return empty checksums... + cmpChecksum(st.ChecksumSHA256, "") + cmpChecksum(st.ChecksumSHA1, "") + cmpChecksum(st.ChecksumCRC32, "") + cmpChecksum(st.ChecksumCRC32C, "") + cmpChecksum(st.ChecksumCRC64NVME, "") + + delete(args, "range") + delete(args, "metadata") + logSuccess(testName, function, args, startTime) + } +} + +// Test PutObject with custom checksums. +func testPutObjectWithTrailingChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumCRC64NVME}, + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + } + for _, test := range tests { + if os.Getenv("MINT_NO_FULL_OBJECT") != "" && test.cs.FullObjectRequested() { + continue + } + function := "PutObject(bucketName, objectName, reader,size, opts)" + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + h := test.cs.Hasher() + h.Reset() + + // Test with Wrong CRC. + args["metadata"] = meta + args["range"] = "false" + args["checksum"] = test.cs.String() + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, + Checksum: test.cs, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + h.Write(b) + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + + function = "GetObject(...)" + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + function = "GetObject( Range...)" + args["range"] = "true" + err = gopts.SetRange(100, 1000) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + b, err = io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + st, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Range requests should return empty checksums... + cmpChecksum(st.ChecksumSHA256, "") + cmpChecksum(st.ChecksumSHA1, "") + cmpChecksum(st.ChecksumCRC32, "") + cmpChecksum(st.ChecksumCRC32C, "") + cmpChecksum(st.ChecksumCRC64NVME, "") + + function = "GetObjectAttributes(...)" + s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + delete(args, "range") + delete(args, "metadata") + logSuccess(testName, function, args, startTime) + } +} + +// Test PutObject with custom checksums. +func testPutMultipartObjectWithChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Trailing: true}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + hashMultiPart := func(b []byte, partSize int, cs minio.ChecksumType) string { + r := bytes.NewReader(b) + hasher := cs.Hasher() + if cs.FullObjectRequested() { + partSize = len(b) + } + tmp := make([]byte, partSize) + parts := 0 + var all []byte + for { + n, err := io.ReadFull(r, tmp) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + logError(testName, function, args, startTime, "", "Calc crc failed", err) + } + if n == 0 { + break + } + parts++ + hasher.Reset() + hasher.Write(tmp[:n]) + all = append(all, hasher.Sum(nil)...) + if err != nil { + break + } + } + if parts == 1 { + return base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + } + hasher.Reset() + hasher.Write(all) + return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) + } + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumFullObjectCRC32}, + {cs: minio.ChecksumFullObjectCRC32C}, + {cs: minio.ChecksumCRC64NVME}, + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + } + + for _, test := range tests { + if os.Getenv("MINT_NO_FULL_OBJECT") != "" && test.cs.FullObjectRequested() { + continue + } + + args["section"] = "prep" + bufSize := dataFileMap["datafile-129-MB"] + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + args["checksum"] = test.cs.String() + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + // fmt.Printf("want %s, got %s\n", want, got) + return + } + } + + const partSize = 10 << 20 + reader := getDataReader("datafile-129-MB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + reader.Close() + h := test.cs.Hasher() + h.Reset() + // wantChksm might be the full object checksum or the multipart checksum, depending on the test.cs type. + wantChksm := hashMultiPart(b, partSize, test.cs) + // wantFullObjectChksm is always the full object checksum that is returned after CopyObject. + wantFullObjectChksm := hashMultiPart(b, len(b), test.cs) + + rd := bytes.NewReader(b) + cs := test.cs + + // Set correct CRC. + args["section"] = "PutObject" + resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{ + DisableContentSha256: true, + DisableMultipart: false, + UserMetadata: nil, + PartSize: partSize, + Checksum: cs, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + switch test.cs.Base() { + case minio.ChecksumCRC32C: + cmpChecksum(resp.ChecksumCRC32C, wantChksm) + case minio.ChecksumCRC32: + cmpChecksum(resp.ChecksumCRC32, wantChksm) + case minio.ChecksumSHA1: + cmpChecksum(resp.ChecksumSHA1, wantChksm) + case minio.ChecksumSHA256: + cmpChecksum(resp.ChecksumSHA256, wantChksm) + case minio.ChecksumCRC64NVME: + cmpChecksum(resp.ChecksumCRC64NVME, wantChksm) + } + + args["section"] = "HeadObject" + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + switch test.cs.Base() { + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, wantChksm) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, wantChksm) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, wantChksm) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, wantChksm) + case minio.ChecksumCRC64NVME: + cmpChecksum(st.ChecksumCRC64NVME, wantChksm) + } + + // Use the CopyObject API to make a copy, in the case it was a composite checksum, + // it will change because the copy is no longer a multipart object. S3 returns the checksum + // of the full object when HeadObject is called on the copy. + args["section"] = "CopyObject" + objectCopyName := objectName + "-copy" + _, err = c.CopyObject(context.Background(), minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectCopyName, + }, minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + }) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + args["section"] = "HeadObject-Copy" + st, err = c.StatObject(context.Background(), bucketName, objectCopyName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + switch test.cs.Base() { + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, wantFullObjectChksm) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, wantFullObjectChksm) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, wantFullObjectChksm) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, wantFullObjectChksm) + case minio.ChecksumCRC64NVME: + cmpChecksum(st.ChecksumCRC64NVME, wantFullObjectChksm) + } + + args["section"] = "GetObjectAttributes" + s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + + if strings.ContainsRune(wantChksm, '-') { + wantChksm = wantChksm[:strings.IndexByte(wantChksm, '-')] + } + switch test.cs { + // Full Object CRC does not return anything with GetObjectAttributes + case minio.ChecksumCRC32C: + cmpChecksum(s.Checksum.ChecksumCRC32C, wantChksm) + case minio.ChecksumCRC32: + cmpChecksum(s.Checksum.ChecksumCRC32, wantChksm) + case minio.ChecksumSHA1: + cmpChecksum(s.Checksum.ChecksumSHA1, wantChksm) + case minio.ChecksumSHA256: + cmpChecksum(s.Checksum.ChecksumSHA256, wantChksm) + } + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + gopts.PartNumber = 2 + + // We cannot use StatObject, since it ignores partnumber. + args["section"] = "GetObject-Part" + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + io.Copy(io.Discard, r) + st, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Test part 2 checksum... + h.Reset() + h.Write(b[partSize : 2*partSize]) + wantChksm = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + switch test.cs { + // Full Object CRC does not return any part CRC for whatever reason. + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, wantChksm) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, wantChksm) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, wantChksm) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, wantChksm) + case minio.ChecksumCRC64NVME: + // AWS doesn't return part checksum, but may in the future. + if st.ChecksumCRC64NVME != "" { + cmpChecksum(st.ChecksumCRC64NVME, wantChksm) + } + } + + delete(args, "metadata") + delete(args, "section") + logSuccess(testName, function, args, startTime) + } +} + +// Test PutObject with trailing checksums. +func testTrailingChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) (oparts []minio.ObjectPart) { + r := bytes.NewReader(b) + tmp := make([]byte, partSize) + parts := 0 + for { + n, err := io.ReadFull(r, tmp) + if err != nil && err != io.ErrUnexpectedEOF { + logError(testName, function, args, startTime, "", "Calc crc failed", err) + } + if n == 0 { + break + } + parts++ + hasher.Reset() + hasher.Write(tmp[:n]) + oparts = append(oparts, minio.ObjectPart{ + PartNumber: parts, + Size: int64(n), + ChecksumCRC32C: base64.StdEncoding.EncodeToString(hasher.Sum(nil)), + }) + if err != nil { + break + } + } + return oparts + } + defer cleanupBucket(bucketName, c) + tests := []struct { + header string + hasher hash.Hash + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + PO minio.PutObjectOptions + }{ + // Currently there is no way to override the checksum type. + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: true, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 5 << 20, + Checksum: minio.ChecksumFullObjectCRC32C, + }, + }, + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: true, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 6_645_654, // Rather arbitrary size + Checksum: minio.ChecksumFullObjectCRC32C, + }, + }, + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: false, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 5 << 20, + Checksum: minio.ChecksumFullObjectCRC32C, + }, + }, + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: false, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 6_645_654, // Rather arbitrary size + Checksum: minio.ChecksumFullObjectCRC32C, + }, + }, + } + + for _, test := range tests { + bufSize := dataFileMap["datafile-11-MB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got)) + return + } + } + + reader := getDataReader("datafile-11-MB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + reader.Close() + h := test.hasher + h.Reset() + + parts := hashMultiPart(b, int(test.PO.PartSize), test.hasher) + cksum, err := minio.ChecksumFullObjectCRC32C.FullObjectChecksum(parts) + if err != nil { + logError(testName, function, args, startTime, "", "checksum calculation failed", err) + return + } + test.ChecksumCRC32C = cksum.Encoded() + + // Set correct CRC. + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // c.TraceOff() + cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) + cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) + cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) + cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + gopts.PartNumber = 2 + + // We cannot use StatObject, since it ignores partnumber. + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + io.Copy(io.Discard, r) + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Test part 2 checksum... + h.Reset() + p2 := b[test.PO.PartSize:] + if len(p2) > int(test.PO.PartSize) { + p2 = p2[:test.PO.PartSize] + } + h.Write(p2) + got := base64.StdEncoding.EncodeToString(h.Sum(nil)) + if test.ChecksumSHA256 != "" { + cmpChecksum(st.ChecksumSHA256, got) + } + if test.ChecksumSHA1 != "" { + cmpChecksum(st.ChecksumSHA1, got) + } + if test.ChecksumCRC32 != "" { + cmpChecksum(st.ChecksumCRC32, got) + } + if test.ChecksumCRC32C != "" { + cmpChecksum(st.ChecksumCRC32C, got) + } + + delete(args, "metadata") + logSuccess(testName, function, args, startTime) + } +} + +// Test PutObject with custom checksums. +func testPutObjectWithAutomaticChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + header string + hasher hash.Hash + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + }{ + // Built-in will only add crc32c, when no MD5 nor SHA256. + {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, + } + + // defer c.TraceOff() + + for i, test := range tests { + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + + h := test.hasher + h.Reset() + h.Write(b) + meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + args["metadata"] = meta + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: nil, + DisableContentSha256: true, + SendContentMd5: false, + }) + if err == nil { + if i == 0 && resp.ChecksumCRC32C == "" { + logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend") + return + } + } else { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent. + // When/if we add a checksum control to PutObjectOptions this will make more sense. + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: nil, + DisableContentSha256: false, + SendContentMd5: false, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // The checksum will not be enabled on HTTP, since it uses SHA256 blocks. + if mustParseBool(os.Getenv(enableHTTPS)) { + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + } + + // Set SHA256 header manually + sh256 := sha256.Sum256(b) + meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])} + args["metadata"] = meta + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: meta, + DisableContentSha256: true, + SendContentMd5: false, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + delete(args, "metadata") + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectAttributes() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-") + args["bucketName"] = bucketNameV + err = c.MakeBucket( + context.Background(), + bucketNameV, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + err = c.EnableVersioning(context.Background(), bucketNameV) + if err != nil { + logError(testName, function, args, startTime, "", "Unable to enable versioning", err) + return + } + + defer cleanupBucket(bucketName, c) + defer cleanupVersionedBucket(bucketNameV, c) + + testFiles := make(map[string]*objectAttributesNewObject) + testFiles["file1"] = &objectAttributesNewObject{ + Object: "file1", + ObjectReaderType: "datafile-1.03-MB", + Bucket: bucketNameV, + ContentType: "custom/contenttype", + SendContentMd5: false, + } + + testFiles["file2"] = &objectAttributesNewObject{ + Object: "file2", + ObjectReaderType: "datafile-129-MB", + Bucket: bucketName, + ContentType: "custom/contenttype", + SendContentMd5: false, + } + + for i, v := range testFiles { + bufSize := dataFileMap[v.ObjectReaderType] + + reader := getDataReader(v.ObjectReaderType) + + args["objectName"] = v.Object + testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: v.ContentType, + SendContentMd5: v.SendContentMd5, + Checksum: minio.ChecksumCRC32C, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + testTable := make(map[string]objectAttributesTableTest) + + testTable["none-versioned"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{}, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-to-0-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 0, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-marker-to-max"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 10000, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-to-1-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 1, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["7-to-6-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 7, + MaxParts: 6, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["versioned"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{}, + test: objectAttributesTestOptions{ + TestFileName: "file1", + StorageClass: "STANDARD", + HasFullChecksum: true, + }, + } + + for i, v := range testTable { + + tf, ok := testFiles[v.test.TestFileName] + if !ok { + continue + } + + args["objectName"] = tf.Object + args["bucketName"] = tf.Bucket + if tf.UploadInfo.VersionID != "" { + v.opts.VersionID = tf.UploadInfo.VersionID + } + + s, err := c.GetObjectAttributes(context.Background(), tf.Bucket, tf.Object, v.opts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + + v.test.NumberOfParts = s.ObjectParts.PartsCount + v.test.ETag = tf.UploadInfo.ETag + v.test.ObjectSize = int(tf.UploadInfo.Size) + + err = validateObjectAttributeRequest(s, &v.opts, &v.test) + if err != nil { + logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed, table test: "+i, err) + return + } + + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectAttributesSSECEncryption() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := "encrypted-object" + args["objectName"] = objectName + bufSize := dataFileMap["datafile-11-MB"] + reader := getDataReader("datafile-11-MB") + + sse := encrypt.DefaultPBKDF([]byte("word1 word2 word3 word4"), []byte(bucketName+objectName)) + + info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: "content/custom", + SendContentMd5: false, + ServerSideEncryption: sse, + PartSize: uint64(bufSize) / 2, + Checksum: minio.ChecksumCRC32C, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + opts := minio.ObjectAttributesOptions{ + ServerSideEncryption: sse, + } + attr, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + err = validateObjectAttributeRequest(attr, &opts, &objectAttributesTestOptions{ + TestFileName: info.Key, + ETag: info.ETag, + NumberOfParts: 2, + ObjectSize: int(info.Size), + HasFullChecksum: true, + HasParts: true, + HasPartChecksums: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectAttributesErrorCases() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-") + unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-") + + _, err = c.GetObjectAttributes(context.Background(), unknownBucket, unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil) + return + } + + errorResponse := err.(minio.ErrorResponse) + if errorResponse.Code != minio.NoSuchBucket { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-") + args["bucketName"] = bucketNameV + err = c.MakeBucket( + context.Background(), + bucketNameV, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + err = c.EnableVersioning(context.Background(), bucketNameV) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + defer cleanupVersionedBucket(bucketNameV, c) + + _, err = c.GetObjectAttributes(context.Background(), bucketName, unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil) + return + } + + errorResponse = err.(minio.ErrorResponse) + if errorResponse.Code != minio.NoSuchKey { + logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchKey+" but got "+errorResponse.Code, nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), bucketName, "", minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty object name should have failed", nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), "", unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), bucketNameV, unknownObject, minio.ObjectAttributesOptions{ + VersionID: uuid.NewString(), + }) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + errorResponse = err.(minio.ErrorResponse) + if errorResponse.Code != minio.NoSuchVersion { + logError(testName, function, args, startTime, "", "Invalid error code, expected "+minio.NoSuchVersion+" but got "+errorResponse.Code, nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +type objectAttributesNewObject struct { + Object string + ObjectReaderType string + Bucket string + ContentType string + SendContentMd5 bool + UploadInfo minio.UploadInfo +} + +type objectAttributesTableTest struct { + opts minio.ObjectAttributesOptions + test objectAttributesTestOptions +} + +type objectAttributesTestOptions struct { + TestFileName string + ETag string + NumberOfParts int + StorageClass string + ObjectSize int + HasPartChecksums bool + HasFullChecksum bool + HasParts bool +} + +func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.ObjectAttributesOptions, test *objectAttributesTestOptions) (err error) { + if opts.VersionID != "" { + if OA.VersionID != opts.VersionID { + err = fmt.Errorf("Expected versionId %s but got versionId %s", opts.VersionID, OA.VersionID) + return err + } + } + + partsMissingChecksum := false + foundPartChecksum := false + for _, v := range OA.ObjectParts.Parts { + checksumFound := false + if v.ChecksumSHA256 != "" { + checksumFound = true + } else if v.ChecksumSHA1 != "" { + checksumFound = true + } else if v.ChecksumCRC32 != "" { + checksumFound = true + } else if v.ChecksumCRC32C != "" { + checksumFound = true + } + if !checksumFound { + partsMissingChecksum = true + } else { + foundPartChecksum = true + } + } + + if test.HasPartChecksums { + if partsMissingChecksum { + err = fmt.Errorf("One or all parts were missing a checksum") + return err + } + } else { + if foundPartChecksum { + err = fmt.Errorf("Did not expect ObjectParts to have checksums but found one") + return err + } + } + + hasFullObjectChecksum := (OA.Checksum.ChecksumCRC32 != "" || + OA.Checksum.ChecksumCRC32C != "" || + OA.Checksum.ChecksumSHA1 != "" || + OA.Checksum.ChecksumSHA256 != "") + + if test.HasFullChecksum { + if !hasFullObjectChecksum { + err = fmt.Errorf("Full object checksum not found") + return err + } + } else { + if hasFullObjectChecksum { + err = fmt.Errorf("Did not expect a full object checksum but we got one") + return err + } + } + + if OA.ETag != test.ETag { + err = fmt.Errorf("Etags do not match, got %s but expected %s", OA.ETag, test.ETag) + return err + } + + if test.HasParts { + if len(OA.ObjectParts.Parts) < 1 { + err = fmt.Errorf("Was expecting ObjectParts but none were present") + return err + } + } + + if OA.StorageClass == "" { + err = fmt.Errorf("Was expecting a StorageClass but got none") + return err + } + + if OA.ObjectSize != test.ObjectSize { + err = fmt.Errorf("Was expecting a ObjectSize but got none") + return err + } + + if test.HasParts { + if opts.MaxParts == 0 { + if len(OA.ObjectParts.Parts) != OA.ObjectParts.PartsCount { + err = fmt.Errorf("expected %s parts but got %d", OA.ObjectParts.PartsCount, len(OA.ObjectParts.Parts)) + return err + } + } else if (opts.MaxParts + opts.PartNumberMarker) > OA.ObjectParts.PartsCount { + if len(OA.ObjectParts.Parts) != (OA.ObjectParts.PartsCount - opts.PartNumberMarker) { + err = fmt.Errorf("expected %d parts but got %d", (OA.ObjectParts.PartsCount - opts.PartNumberMarker), len(OA.ObjectParts.Parts)) + return err + } + } else if opts.MaxParts != 0 { + if opts.MaxParts != len(OA.ObjectParts.Parts) { + err = fmt.Errorf("expected %d parts but got %d", opts.MaxParts, len(OA.ObjectParts.Parts)) + return err + } + } + } + + if OA.ObjectParts.NextPartNumberMarker == OA.ObjectParts.PartsCount { + if OA.ObjectParts.IsTruncated { + err = fmt.Errorf("Expected ObjectParts to NOT be truncated, but it was") + return err + } + } + + if OA.ObjectParts.NextPartNumberMarker != OA.ObjectParts.PartsCount { + if !OA.ObjectParts.IsTruncated { + err = fmt.Errorf("Expected ObjectParts to be truncated, but it was NOT") + return err + } + } + + return err +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + data := []byte{} + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := newRandomReader(size, size) + ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != size { + logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if objInfo.Size != size { + logError(testName, function, args, startTime, "", "Unexpected size", err) + return + } + + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with preconditions on non-existent objects +func testPutObjectPreconditionOnNonExistent() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts) with preconditions" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{SetMatchETag/SetMatchETagExcept}", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test 1: PutObject with SetMatchETag on non-existent object should fail + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "test-object-") + args["objectName"] = objectName + + data := bytes.NewReader([]byte("test data")) + + opts := minio.PutObjectOptions{} + opts.SetMatchETag("some-etag") + + _, err = c.PutObject(context.Background(), bucketName, objectName, data, int64(data.Len()), opts) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with SetMatchETag on non-existent object should have failed", nil) + return + } + + errResp := minio.ToErrorResponse(err) + if errResp.Code != "NoSuchKey" { + logError(testName, function, args, startTime, "", fmt.Sprintf("Expected NoSuchKey error (AWS standard for non-existent objects), got %s", errResp.Code), err) + return + } + + // Test 2: PutObject with SetMatchETagExcept (If-None-Match) on non-existent object should succeed + objectName2 := randString(60, rand.NewSource(time.Now().UnixNano()), "test-object2-") + args["objectName"] = objectName2 + + data2 := bytes.NewReader([]byte("test data 2")) + opts2 := minio.PutObjectOptions{} + opts2.SetMatchETagExcept("some-etag") + + _, err = c.PutObject(context.Background(), bucketName, objectName2, data2, int64(data2.Len()), opts2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with SetMatchETagExcept (If-None-Match) on non-existent object should have succeeded", err) + return + } + // Test 3: CompleteMultipartUpload with preconditions on non-existent object should fail + objectName3 := randString(60, rand.NewSource(time.Now().UnixNano()), "test-multipart-") + args["objectName"] = objectName3 + + data3 := bytes.Repeat([]byte("a"), 5*1024*1024+1) + reader3 := bytes.NewReader(data3) + + opts3 := minio.PutObjectOptions{} + opts3.SetMatchETag("non-existent-etag") + + _, err = c.PutObject(context.Background(), bucketName, objectName3, reader3, int64(len(data3)), opts3) + if err == nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload with SetMatchETag on non-existent object should have failed", nil) + return + } + + errResp = minio.ToErrorResponse(err) + if errResp.Code != "NoSuchKey" { + logError(testName, function, args, startTime, "", fmt.Sprintf("Expected NoSuchKey error (AWS standard for non-existent objects) for multipart, got %s", errResp.Code), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + pos, err := r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + buf2 := make([]byte, 100) + m, err := readFull(r, buf2) + if err != nil { + logError(testName, function, args, startTime, "", "Error reading through readFull", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + logError(testName, function, args, startTime, "", "Values at same index dont match", err) + return + } + pos, err = r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + if err = r.Close(); err != nil { + logError(testName, function, args, startTime, "", "ObjectClose failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test RemoveObjects request where context cancels after timeout +func testRemoveObjectsContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Instantiate new minio client. + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan minio.ObjectInfo) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjects API with short timeout. + errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 1)) + + // Multi remove of 1100 objects + nrObjects := 1100 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 1, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing multiple objects with Remove API as iterator +func testRemoveMultipleObjectsIter() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + buf := []byte("a") + + // Multi remove of 1100 objects + nrObjects := 1100 + + objectsIter := func() iter.Seq[minio.ObjectInfo] { + return func(yield func(minio.ObjectInfo) bool) { + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + if !yield(minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + }) { + return + } + } + } + } + + // Call RemoveObjects API + results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter(), minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + + for result := range results { + if result.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", result.Err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing multiple objects and check for results +func testRemoveMultipleObjectsWithResult() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupVersionedBucket(bucketName, c) + + buf := []byte("a") + + nrObjects := 10 + nrLockedObjects := 5 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), 1, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if i < nrLockedObjects { + // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC) + t := time.Now().Add(5 * time.Minute) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + VersionID: info.VersionID, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + } + + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + var foundNil, foundErr int + + for { + // Check if errorCh doesn't receive any error + select { + case deleteRes, ok := <-resultCh: + if !ok { + goto out + } + if deleteRes.ObjectName == "" { + logError(testName, function, args, startTime, "", "Unexpected object name", nil) + return + } + if deleteRes.ObjectVersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected object version ID", nil) + return + } + + if deleteRes.Err == nil { + foundNil++ + } else { + foundErr++ + } + } + } +out: + if foundNil+foundErr != nrObjects { + logError(testName, function, args, startTime, "", "Unexpected number of results", nil) + return + } + + if foundNil != nrObjects-nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil) + return + } + + if foundErr != nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of errors", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + fileName := getMintDataDirFilePath("datafile-129-MB") + if fileName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := dataFileMap["datafile-129-MB"] + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + if objInfo.Size != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return + } + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType doesn't match", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + + // Make a new bucket. + args["bucketName"] = bucketName + args["location"] = location + function = "MakeBucket(bucketName, location)" + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-129-MB") + if fName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + function = "FPutObject(bucketName, objectName, fileName, opts)" + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + "-standard" + args["fileName"] = fName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + if ui.Size != int64(dataFileMap["datafile-129-MB"]) { + logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + srcFile, err := os.Open(fName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File create failed", err) + return + } + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + tmpFile.Close() + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-standard" + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-Octet" + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-GTar" + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) + return + } + + os.Remove(fName + ".gtar") + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + cancel() + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests get object with s3zip extensions. +func testGetObjectS3Zip() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{"x-minio-extract": true} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" + args["objectName"] = objectName + + var zipFile bytes.Buffer + zw := zip.NewWriter(&zipFile) + rng := rand.New(rand.NewSource(0xc0cac01a)) + const nFiles = 500 + for i := 0; i <= nFiles; i++ { + if i == nFiles { + // Make one large, compressible file. + i = 1000000 + } + b := make([]byte, i) + if i < nFiles { + rng.Read(b) + } + wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) + if err != nil { + logError(testName, function, args, startTime, "", "zw.Create failed", err) + return + } + wc.Write(b) + } + err = zw.Close() + if err != nil { + logError(testName, function, args, startTime, "", "zw.Close failed", err) + return + } + buf := zipFile.Bytes() + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) + return + } + r.Close() + + zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + logError(testName, function, args, startTime, "", "zip.NewReader failed", err) + return + } + lOpts := minio.ListObjectsOptions{} + lOpts.Set("x-minio-extract", "true") + lOpts.Prefix = objectName + "/" + lOpts.Recursive = true + list := c.ListObjects(context.Background(), bucketName, lOpts) + listed := map[string]minio.ObjectInfo{} + for item := range list { + if item.Err != nil { + break + } + listed[item.Key] = item + } + if len(listed) == 0 { + // Assume we are running against non-minio. + args["SKIPPED"] = true + logIgnored(testName, function, args, startTime, "s3zip does not appear to be present") + return + } + + for _, file := range zr.File { + if file.FileInfo().IsDir() { + continue + } + args["zipfile"] = file.Name + zfr, err := file.Open() + if err != nil { + logError(testName, function, args, startTime, "", "file.Open failed", err) + return + } + want, err := io.ReadAll(zfr) + if err != nil { + logError(testName, function, args, startTime, "", "fzip file read failed", err) + return + } + + opts := minio.GetObjectOptions{} + opts.Set("x-minio-extract", "true") + key := path.Join(objectName, file.Name) + r, err = c.GetObject(context.Background(), bucketName, key, opts) + if err != nil { + terr := minio.ToErrorResponse(err) + if terr.StatusCode != http.StatusNotFound { + logError(testName, function, args, startTime, "", "GetObject failed", err) + } + return + } + got, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + r.Close() + if !bytes.Equal(want, got) { + logError(testName, function, args, startTime, "", "Content mismatch", err) + return + } + oi, ok := listed[key] + if !ok { + logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) + return + } + if int(oi.Size) != len(got) { + logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) + return + } + delete(listed, key) + } + delete(args, "zipfile") + if len(listed) > 0 { + logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) + return + } + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, len(buf)) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + policy := minio.NewPostPolicy() + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + policy.SetContentEncoding("gzip") + + // Add CRC32C + checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + filePath := getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName + + if !strings.Contains(expectedLocation, ".amazonaws.com/") { + // Test when not against AWS S3. + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + } + wantChecksumCrc32c := checksum.Encoded() + if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != wantChecksumCrc32c { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", wantChecksumCrc32c, got), nil) + return + } + + // Ensure that when we subsequently GetObject, the checksum is returned + gopts := minio.GetObjectOptions{Checksum: true} + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.ChecksumCRC32C != wantChecksumCrc32c { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %s, got %s", wantChecksumCrc32c, st.ChecksumCRC32C), nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// testPresignedPostPolicyWrongFile tests that when we have a policy with a checksum, we cannot POST the wrong file +func testPresignedPostPolicyWrongFile() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + policy := minio.NewPostPolicy() + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + + // Add CRC32C of some data that the policy will explicitly allow. + checksum := minio.ChecksumCRC32C.ChecksumBytes([]byte{0x01, 0x02, 0x03}) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + // At this stage, we have a policy that allows us to upload for a specific checksum. + // Test that uploading datafile-10-kB, with a different checksum, fails as expected + filePath := getMintDataDirFilePath("datafile-10-kB") + if filePath == "" { + // Make a temp file with 10 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-10-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + fileReader := getDataReader("datafile-10-kB") + defer fileReader.Close() + buf10k, err := io.ReadAll(fileReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + otherChecksum := minio.ChecksumCRC32C.ChecksumBytes(buf10k) + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + if k == "x-amz-checksum-crc32c" { + v = otherChecksum.Encoded() + } + writer.WriteField(k, v) + } + + // Add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // Make the POST request with the form data. + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusForbidden { + logError(testName, function, args, startTime, "", "HTTP request unexpected status", errors.New(res.Status)) + return + } + + // Read the response body, ensure it has checksum failure message + resBody, err := io.ReadAll(res.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Normalize the response body, because S3 uses quotes around the policy condition components + // in the error message, MinIO does not. + resBodyStr := strings.ReplaceAll(string(resBody), `"`, "") + if !strings.Contains(resBodyStr, "Policy Condition failed: [eq, $x-amz-checksum-crc32c, 8TDyHg=") { + logError(testName, function, args, startTime, "", "Unexpected response body", errors.New(resBodyStr)) + return + } + + logSuccess(testName, function, args, startTime) +} + +// testPresignedPostPolicyEmptyFileName tests that an empty file name in the presigned post policy +func testPresignedPostPolicyEmptyFileName() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + policy := minio.NewPostPolicy() + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + policy.SetContentEncoding("gzip") + + // Add CRC32C + checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + filePath := getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusBadRequest { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + body, err := io.ReadAll(res.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !strings.Contains(string(body), "MalformedPOSTRequest") { + logError(testName, function, args, startTime, "", "Invalid error from server", errors.New(string(body))) + } + + logSuccess(testName, function, args, startTime) +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(dst, src)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + // Set copy conditions. + MatchETag: objInfo.ETag, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + } + args["src"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return + } + + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "copy data CRC check failed", err) + return + } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + ReplaceMetadata: true, + UserMetadata: map[string]string{ + "Copy": "should be same", + }, + } + args["dst"] = dst + args["src"] = src + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(oi.ETag) + objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-C get object ReaderSeeker interface methods. +func testSSECEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-S3 get object ReaderSeeker interface methods. +func testSSES3EncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-C get object ReaderAt interface methods. +func testSSECEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-S3 get object ReaderAt interface methods. +func testSSES3EncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// testSSECEncryptionPutGet tests encryption with customer provided encryption keys +func testSSECEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + logSuccess(testName, function, args, startTime) + + } + + logSuccess(testName, function, args, startTime) +} + +// TestEncryptionFPut tests encryption with customer specified encryption keys +func testSSECEncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + logSuccess(testName, function, args, startTime) +} + +// testSSES3EncryptionPutGet tests SSE-S3 encryption +func testSSES3EncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back without any encryption headers + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + logSuccess(testName, function, args, startTime) + + } + + logSuccess(testName, function, args, startTime) +} + +// TestSSES3EncryptionFPut tests server side encryption +func testSSES3EncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + logSuccess(testName, function, args, startTime) +} + +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + logIgnored(testName, function, args, startTime, "Skipped notification test as it is not configured") + return + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := notification.NewConfig(topicArn) + topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := notification.NewConfig(queueArn) + queueConfig.AddEvents(notification.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + config := notification.Configuration{} + config.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + config.AddTopic(topicConfig) + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Duplicate entry added", err) + return + } + + // Add and remove a queue config + config.AddQueue(queueConfig) + config.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(context.Background(), bucketName, config) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) + return + } + + config, err = c.GetBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) + return + } + + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Topic config is empty", err) + return + } + + if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) + return + } + + err = c.RemoveAllBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctional()" + functionAll := "" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket. + function = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" + args["bucketName"] = bucketName + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + + defer cleanupBucket(bucketName, c) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File creation failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "File write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Asserting the default bucket policy. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) + return + } + + // Set the bucket policy to 'public readonly'. + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readOnlyPolicy, + } + + err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public writeonly'. + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": writeOnlyPolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `writeonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readwrite`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjects()" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) + return + } + newReader.Close() + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport := createHTTPTransport() + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + resp.Body.Close() + + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedGetObject success", err) + return + } + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedPutObject success", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return + } + + newReadBytes, err = io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err != nil { + logError(testName, function, args, startTime, "", "Presigned failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + req.Header.Add("mysecret", "abcxxx") + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) + return + } + + newReadBytes, err = io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) + return + } + + function = "RemoveObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + args["objectName"] = objectName + "-f" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-nolength" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presigned" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presign-custom" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + function = "RemoveBucket(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + err = c.RemoveBucket(context.Background(), bucketName) + if err == nil { + logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return + } + if err.Error() != "The specified bucket does not exist" { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + logSuccess(testName, functionAll, args, startTime) +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectModified() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + objectName := "myobject" + args["objectName"] = objectName + content := "helloworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold." + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + var tempfile *os.File + + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName + } else { + tempfile, err = os.CreateTemp("", "minio-go-upload-test-") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + length := 100 * humanize.KiByte + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(testName, function, args, startTime, "", "TempFile seek failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + tempfile.Close() + + obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer obj.Close() + + n, err := obj.Seek(int64(offset), 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != int64(offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + region := "eu-west-1" + args["bucketName"] = bucketName + args["region"] = region + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != minio.BucketAlreadyExists && + minio.ToErrorResponse(err).Code != minio.BucketAlreadyOwnedByYou { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(fileName, fileName+".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "Rename failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers and sizes + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if rStandard.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return + } + + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return + } + + if rOctet.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) + return + } + + os.Remove(fileName + ".gtar") + logSuccess(testName, function, args, startTime) +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + n, err := r.Seek(offset, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + n, err = r.Seek(0, 1) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + _, err = r.Seek(offset, 2) + if err == nil { + logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return + } + n, err = r.Seek(-offset, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != st.Size-offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != (offset - 1) { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, bufSize) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, bufSize+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + args["source"] = src + + // Set copy conditions. + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + args["destination"] = dst + + // Perform the Copy + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return + } + + // Close all the readers. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests copy object with various checksum scenarios, tries to not repeat CopyObjectV2 test and +// instead just focus on Checksum. +func testCopyObjectWithChecksums() { + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectWithChecksums(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // PutObject to upload the object to the bucket, this object will have a Crc64NVME checksum applied + // by default since nothing was explicitly specified. + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // GetObject to obtain the eTag + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy source options + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + + tests := []struct { + csType minio.ChecksumType + cs wantChecksums + }{ + {csType: minio.ChecksumCRC64NVME, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + {csType: minio.ChecksumCRC32C, cs: wantChecksums{minio.ChecksumCRC32C: "aHnJMw=="}}, + {csType: minio.ChecksumCRC32, cs: wantChecksums{minio.ChecksumCRC32: "tIZ8hA=="}}, + {csType: minio.ChecksumSHA1, cs: wantChecksums{minio.ChecksumSHA1: "6YIIbcWH1iLaCFqs5vwq5Rwvm+o="}}, + {csType: minio.ChecksumSHA256, cs: wantChecksums{minio.ChecksumSHA256: "GKeJTopbMGPs3h4fAw4oe0R2QnnmFVJeIWkqCkp28Yo="}}, + // In S3, all copied objects without checksums and specified destination checksum algorithms + // automatically gain a CRC-64NVME checksum algorithm. Use ChecksumNone for this case. + {csType: minio.ChecksumNone, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + } + + for _, test := range tests { + args := map[string]interface{}{} + args["srcOpts"] = src + args["section"] = "setup" + args["checksum"] = test.csType.String() + + // Copy destination options + bucketCopyName := bucketName + "-copy" + objectCopyName := objectName + "-copy-" + test.csType.String() + dst := minio.CopyDestOptions{ + Bucket: bucketCopyName, + Object: objectCopyName, + ReplaceMetadata: true, + } + if test.csType != minio.ChecksumNone { + // Request the server-side checksum on the copy. + // ChecksumNone is a flag to leave off the header + dst.ChecksumType = test.csType + } + args["destOpts"] = dst + + // Perform the Copy + args["section"] = "CopyObject" + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Checksum verification + args["section"] = "HeadObject" + st, err := c.StatObject(context.Background(), bucketCopyName, objectCopyName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.ChecksumMode != "FULL_OBJECT" { + logError(testName, function, args, startTime, "", "ChecksumMode want: FULL_OBJECT, got "+st.ChecksumMode, nil) + return + } + err = cmpChecksum(st, test.cs) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch", err) + return + } + + logSuccess(testName, function, args, startTime) + } +} + +// Tests replacing an object with CopyObject and a new Checksum type +func testReplaceObjectWithChecksums() { + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectWithChecksums(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + tests := []struct { + csType minio.ChecksumType + cs wantChecksums + }{ + {csType: minio.ChecksumCRC64NVME, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + {csType: minio.ChecksumCRC32C, cs: wantChecksums{minio.ChecksumCRC32C: "aHnJMw=="}}, + {csType: minio.ChecksumCRC32, cs: wantChecksums{minio.ChecksumCRC32: "tIZ8hA=="}}, + {csType: minio.ChecksumSHA1, cs: wantChecksums{minio.ChecksumSHA1: "6YIIbcWH1iLaCFqs5vwq5Rwvm+o="}}, + {csType: minio.ChecksumSHA256, cs: wantChecksums{minio.ChecksumSHA256: "GKeJTopbMGPs3h4fAw4oe0R2QnnmFVJeIWkqCkp28Yo="}}, + // In S3, all copied objects without checksums and specified destination checksum algorithms + // automatically gain a CRC-64NVME checksum algorithm. Use ChecksumNone for this case. + {csType: minio.ChecksumNone, cs: wantChecksums{minio.ChecksumCRC64NVME: "iRtfQH3xflQ="}}, + } + + for _, test := range tests { + args := map[string]interface{}{} + args["section"] = "setup" + args["destOpts"] = "" + args["checksum"] = test.csType.String() + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // PutObject to upload the object to the bucket + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // GetObject to obtain the eTag + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy source options + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + + // Copy destination options, overwrite the existing object + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + // S3 requires that we send some new metadata otherwise it complains that the + // CopyObject is illegal. + UserMetadata: map[string]string{ + "TestMeta": objectName + "-meta-" + test.csType.String(), + }, + ReplaceMetadata: true, + } + if test.csType != minio.ChecksumNone { + // Request the server-side checksum on the copy. + // ChecksumNone is a flag to leave off the header + dst.ChecksumType = test.csType + } + args["destOpts"] = dst + + // Perform the Copy + args["section"] = "CopyObject" + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Checksum verification + args["section"] = "HeadObject" + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.ChecksumMode != "FULL_OBJECT" { + logError(testName, function, args, startTime, "", "ChecksumMode want: FULL_OBJECT, got "+st.ChecksumMode, nil) + return + } + err = cmpChecksum(st, test.cs) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch", err) + return + } + + logSuccess(testName, function, args, startTime) + } +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.CopySrcOptions{} + srcSlice := srcArr[:] + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "object", + } + + args["destination"] = dst + // Just explain about srcArr in args["sourceList"] + // to stop having 10,001 null headers logged + args["sourceList"] = "source array of 10,001 elements" + if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { + logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) + return + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + logError(testName, function, args, startTime, "", "Got unexpected error", err) + return + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "badObject", + MatchRange: true, + Start: 1, + End: badSrcSize, + } + + // 3. ComposeObject call should fail. + if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { + logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) + return + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + logError(testName, function, args, startTime, "", "Got invalid error", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{ + "destination": "", + "sourceList": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.CopySrcOptions{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + }) + } + + // make the last part very small + srcs[9].MatchRange = true + + args["sourceList"] = srcs + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + } + args["destination"] = dst + + ui, err := c.ComposeObject(context.Background(), dst, srcs...) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + if ui.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) + return + } + + objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test concatenating multiple 10K objects V2 +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "object", + Encryption: sse, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: newSSE, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + + delete(args, "objectName") + logSuccess(testName, function, args, startTime) +} + +func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { + // initialize logging params + startTime := time.Now() + testName := getFuncNameLoc(2) + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + args["testName"] = testName + var srcEncryption, dstEncryption encrypt.ServerSide + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + + // Calculate the CRC32C checksum for the object + meta := map[string]string{} + h := minio.ChecksumCRC32C.Hasher() + h.Reset() + h.Write(buf) + meta[minio.ChecksumCRC32C.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + if sseSrc != nil && sseSrc.Type() != encrypt.S3 { + srcEncryption = sseSrc + } + + // 2. copy object and change encryption key + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: srcEncryption, + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + Encryption: sseDst, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + if sseDst != nil && sseDst.Type() != encrypt.S3 { + dstEncryption = sseDst + } + // 3. get copied object and check if content is equal + coreClient := minio.Core{Client: c} + reader, oi, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption, Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + err = cmpChecksum(oi, wantChecksums{minio.ChecksumCRC32C: "bSoobA=="}) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch on dstObject", err) + return + } + + // Test key rotation for source object in-place. + var newSSE encrypt.ServerSide + if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { + newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + } + if sseSrc != nil && sseSrc.Type() == encrypt.S3 { + newSSE = encrypt.NewSSE() + } + if newSSE != nil { + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, oi, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE, Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + err = cmpChecksum(oi, wantChecksums{minio.ChecksumCRC32C: "bSoobA=="}) + if err != nil { + fmt.Printf("srcObject objectInfo: %+v\n", oi) + logError(testName, function, args, startTime, "", "Checksum mismatch on srcObject for in-place", err) + return + } + + // Test in-place decryption. + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + } + args["destination"] = dst + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["source"] = src + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) + return + } + } + + // Get copied decrypted object and check if content is equal + reader, oi, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + + err = cmpChecksum(oi, wantChecksums{minio.ChecksumCRC32C: "bSoobA=="}) + if err != nil { + logError(testName, function, args, startTime, "", "Checksum mismatch for decrypted object", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test encrypted copy object +func testUnencryptedToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.NewSSE() + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc, sseDst encrypt.ServerSide + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.NewSSE() + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + var sseDst encrypt.ServerSide + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.NewSSE() + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + var sseDst encrypt.ServerSide + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true, TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + Encryption: encrypt.SSECopy(encryption), + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "decrypted-" + objectName, + } + args["destination"] = dst + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + logSuccess(testName, function, args, startTime) +} + +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, + bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, + minio.PutObjectPartOptions{SSE: srcencryption}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, + bytes.NewReader(buf[5*1024*1024:]), 1024*1024, + minio.PutObjectPartOptions{SSE: srcencryption}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 6*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + return + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + return + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation +func testSSECEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy +func testSSECEncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + var dstencryption encrypt.ServerSide + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy +func testSSECEncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part +func testUnencryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{TrailingHeaders: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testUnencryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testUnencryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part +func testSSES3EncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testSSES3EncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testSSES3EncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + client, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{Client: client} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return h + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + h.Add(k, vs[0]) + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(context.Background(), bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) + return + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 2. create source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + } + + // 2.1 create destination with metadata set + dst1 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-1", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + args["source"] = src + args["destination"] = dst1 + _, err = c.CopyObject(context.Background(), dst1, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 4. create destination with no metadata set and same source + dst2 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-2", + } + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + args["source"] = src + args["destination"] = dst2 + _, err = c.CopyObject(context.Background(), dst2, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 6. Compose a pair of sources. + dst3 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-3", + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst3 + _, err = c.ComposeObject(context.Background(), dst3, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 7. Compose a pair of sources with dest user metadata set. + dst4 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-4", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst4 + _, err = c.ComposeObject(context.Background(), dst4, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + testUserMetadataCopyingWrapper(c) +} + +func testStorageClassMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return h + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClass") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testStorageClassInvalidMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassInvalidMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testStorageClassMetadataCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataCopyObject()" + args := map[string]interface{}{} + testName := getFuncName() + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return h + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) + + // Put an object with RRS Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClass", + } + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClassCopy") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + // Put an object with Standard Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectSSClass", + } + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectSSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + return + } + // Fetch the meta data of copied object + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + args["size"] = bufSize + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != 4 { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) + return + } + + if st.Size != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) + return + } + + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with 0 byte object with non-US-ASCII characters. +func testPutObjectMetadataNonUSASCIIV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + metadata := map[string]string{ + "test-zh": "你好", + "test-ja": "こんにちは", + "test-ko": "안녕하세요", + "test-ru": "Здравствуй", + "test-de": "Hallo", + "test-it": "Ciao", + "test-pt": "Olá", + "test-ar": "مرحبا", + "test-hi": "नमस्ते", + "test-hu": "Helló", + "test-ro": "Bună", + "test-be": "Прывiтанне", + "test-sl": "Pozdravljen", + "test-sr": "Здраво", + "test-bg": "Здравейте", + "test-uk": "Привіт", + } + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{ + UserMetadata: metadata, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + for k, v := range metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + continue + } + if st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)) != v { + logError(testName, function, args, startTime, "", "Expected upload object metadata "+k+": "+v+" but got "+st.Metadata.Get(http.CanonicalHeaderKey("X-Amz-Meta-"+k)), err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating multiple 10K objects V4 +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctionalV2()" + functionAll := "" + args := map[string]interface{}{} + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + // Make a new bucket. + function = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" + args = map[string]interface{}{ + "bucketName": bucketName, + "location": location, + } + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + objectNameNoLength := objectName + "-nolength" + args["objectName"] = objectNameNoLength + _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) + return + } + + incompObjNotFound := true + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FgetObject failed", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "Got empty ETag", err) + return + } + resp.Body.Close() + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + args["reqParams"] = reqParams + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) + return + } + + newReadBytes, err = io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err == nil { + logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + logSuccess(testName, functionAll, args, startTime) +} + +// Test get object with GetObject with context +func testGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "object Close() call failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with FGetObject with a user provided context +func testFGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with GetObject with a user provided context +func testGetObjectRanges() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + rng := rand.NewSource(time.Now().UnixNano()) + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rng, "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rng, "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + tests := []struct { + start int64 + end int64 + }{ + { + start: 1024, + end: 1024 + 1<<20, + }, + { + start: 20e6, + end: 20e6 + 10000, + }, + { + start: 40e6, + end: 40e6 + 10000, + }, + { + start: 60e6, + end: 60e6 + 10000, + }, + { + start: 80e6, + end: 80e6 + 10000, + }, + { + start: 120e6, + end: int64(bufSize), + }, + } + for _, test := range tests { + wantRC := getDataReader("datafile-129-MB") + io.CopyN(io.Discard, wantRC, test.start) + want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) + opts := minio.GetObjectOptions{} + opts.SetRange(test.start, test.end) + args["opts"] = fmt.Sprintf("%+v", test) + obj, err := c.GetObject(ctx, bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + err = crcMatches(obj, want) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object ACLs with GetObjectACL with custom provided context +func testGetObjectACLContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACL(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(context.Background(), bucketName, + objectName, reader, int64(bufSize), + minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + UserMetadata: metaData, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr != nil { + logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + // Do a very limited testing if this is not AWS S3 + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + if s[0] != "private" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + logSuccess(testName, function, args, startTime) + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + reader2 := getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + bufSize := dataFileMap["datatfile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["size"] = bufSize + defer cancel() + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with GetObject with custom context +func testGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", " object Close() call failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with FGetObject with custom context +func testFGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + + c, err := NewClient(ClientConfig{CredsV2: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datatfile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test list object v1 and V2 +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testObjects := []struct { + name string + storageClass string + }{ + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } + + testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(context.Background(), bucket, opts) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + return + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + logIgnored(testName, function, args, startTime, "ListObjects doesn't return expected storage class") + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + return + } + } + + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) + + logSuccess(testName, function, args, startTime) +} + +// testCors is runnable against S3 itself. +// Just provide the env var MINIO_GO_TEST_BUCKET_CORS with bucket that is public and WILL BE DELETED. +// Recreate this manually each time. Minio-go SDK does not support calling +// SetPublicBucket (put-public-access-block) on S3, otherwise we could script the whole thing. +func testCors() { + ctx := context.Background() + startTime := time.Now() + testName := getFuncName() + function := "SetBucketCors(bucketName, cors)" + args := map[string]interface{}{ + "bucketName": "", + "cors": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Create or reuse a bucket that will get cors settings applied to it and deleted when done + bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS") + if bucketName == "" { + bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + } + args["bucketName"] = bucketName + defer cleanupBucket(bucketName, c) + + publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}` + err = c.SetBucketPolicy(ctx, bucketName, publicPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // Upload an object for testing. + objectContents := `some-text-file-contents` + reader := strings.NewReader(objectContents) + bufSize := int64(len(objectContents)) + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + bucketURL := c.EndpointURL().String() + "/" + bucketName + "/" + objectURL := bucketURL + objectName + + httpClient := &http.Client{ + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + + errStrAccessForbidden := `AccessForbiddenCORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted` + testCases := []struct { + name string + + // Cors rules to apply + applyCorsRules []cors.Rule + + // Outbound request info + method string + url string + headers map[string]string + + // Wanted response + wantStatus int + wantHeaders map[string]string + wantBodyContains string + }{ + { + name: "apply bucket rules", + applyCorsRules: []cors.Rule{ + { + AllowedOrigin: []string{"https"}, // S3 documents 'https' origin, but it does not actually work, see test below. + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + }, + { + AllowedOrigin: []string{"http://www.example1.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + ExposeHeader: []string{"x-amz-server-side-encryption", "x-amz-request-id"}, + MaxAgeSeconds: 3600, + }, + { + AllowedOrigin: []string{"http://www.example2.com"}, + AllowedMethod: []string{"POST"}, + AllowedHeader: []string{"X-My-Special-Header"}, + ExposeHeader: []string{"X-AMZ-Request-ID"}, + }, + { + AllowedOrigin: []string{"http://www.example3.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"X-Example-3-Special-Header"}, + MaxAgeSeconds: 10, + }, + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{"GET"}, + AllowedHeader: []string{"*"}, + ExposeHeader: []string{"x-amz-request-id", "X-AMZ-server-side-encryption"}, + MaxAgeSeconds: 3600, + }, + { + AllowedOrigin: []string{"http://multiplemethodstest.com"}, + AllowedMethod: []string{"POST", "PUT", "DELETE"}, + AllowedHeader: []string{"x-abc-*", "x-def-*"}, + }, + { + AllowedOrigin: []string{"http://UPPERCASEEXAMPLE.com"}, + AllowedMethod: []string{"DELETE"}, + }, + { + AllowedOrigin: []string{"https://*"}, + AllowedMethod: []string{"DELETE"}, + AllowedHeader: []string{"x-abc-*", "x-def-*"}, + }, + }, + }, + { + name: "preflight to object url matches example1 rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "", + }, + }, + { + name: "preflight to bucket url matches example1 rule", + method: http.MethodOptions, + url: bucketURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + }, + }, + { + name: "preflight matches example2 rule with header given", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example2.com", + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "X-My-Special-Header", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example2.com", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "x-my-special-header", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Content-Length": "0", + }, + }, + { + name: "preflight matches example2 rule with no header given", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example2.com", + "Access-Control-Request-Method": "POST", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example2.com", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Content-Length": "0", + }, + }, + { + name: "preflight matches wildcard origin rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.couldbeanything.com", + "Access-Control-Request-Method": "GET", + "Access-Control-Request-Headers": "x-custom-header,x-other-custom-header", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "x-custom-header,x-other-custom-header", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + }, + }, + { + name: "preflight does not match any rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.couldbeanything.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight does not match example1 rule because of method", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "POST", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "s3 processes cors rules even when request is not preflight if cors headers present test get", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Request-Method": "PUT", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + // "Access-Control-Allow-Methods": "PUT", + // "Access-Control-Max-Age": "3600", + }, + }, + { + name: "s3 processes cors rules even when request is not preflight if cors headers present test put", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "GET", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Origin": "*", + "Access-Control-Expose-Headers": "x-amz-request-id,x-amz-server-side-encryption", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + // "Access-Control-Allow-Methods": "PUT", + // "Access-Control-Max-Age": "3600", + }, + }, + { + name: "s3 processes cors rules even when request is not preflight but there is no rule match", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "get request matches wildcard origin rule and returns cors headers", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-request-id,X-AMZ-server-side-encryption", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "GET", + }, + }, + { + name: "head request does not match rule and returns no cors headers", + method: http.MethodHead, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.nomatchingdomainfound.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put request with origin does not match rule and returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.nomatchingdomainfound.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put request with no origin does not match rule and returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{}, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request with wildcard origin does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.notsecureexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight for delete request with wildcard https origin matches secureexample", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "https://www.secureexample.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request matches secureexample with wildcard https origin and request headers", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "https://www.secureexample.com", + "Access-Control-Allow-Headers": "x-abc-1,x-abc-second,x-def-1", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request matches secureexample rejected because request header does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1,x-does-not-match", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight with https origin is documented by s3 as matching but it does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.securebutdoesnotmatch.com", + "Access-Control-Request-Method": "PUT", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "put no origin no match returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{}, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put with origin match example1 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "put with origin and header match example1 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "x-could-be-anything": "myvalue", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "put no match found returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.unmatchingdomain.com", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put with origin match example3 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example3.com", + "X-My-Special-Header": "myvalue", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example3.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "10", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "preflight matches example1 rule headers case is incorrect", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + // Fetch standard guarantees that these are sent lowercase, here we test what happens when they are not. + "Access-Control-Request-Headers": "X-Another-Header,X-Could-Be-Anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + }, + }, + { + name: "preflight matches example1 rule headers are not sorted", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + // Fetch standard guarantees that these are sorted, test what happens when they are not. + "Access-Control-Request-Headers": "a-customer-header,b-should-be-last", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "a-customer-header,b-should-be-last", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + }, + }, + { + name: "preflight with case sensitivity in origin matches uppercase", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight with case sensitivity in origin does not match when lowercase", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://uppercaseexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight match upper case with unknown header but no header restrictions", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-unknown-1", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight for delete request matches multiplemethodstest.com origin and request headers", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://multiplemethodstest.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://multiplemethodstest.com", + "Access-Control-Allow-Headers": "x-abc-1", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + // S3 returns POST, PUT, DELETE here, MinIO does not as spec does not require it. + // "Access-Control-Allow-Methods": "DELETE", + }, + }, + { + name: "delete request goes ahead because cors is only for browsers and does not block on the server side", + method: http.MethodDelete, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.justrandom.com", + }, + wantStatus: http.StatusNoContent, + }, + } + + for i, test := range testCases { + testName := fmt.Sprintf("%s_%d_%s", testName, i+1, strings.ReplaceAll(test.name, " ", "_")) + + // Apply the CORS rules + if test.applyCorsRules != nil { + corsConfig := &cors.Config{ + CORSRules: test.applyCorsRules, + } + err = c.SetBucketCors(ctx, bucketName, corsConfig) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + return + } + } + + // Make request + if test.method != "" && test.url != "" { + req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request creation failed", err) + return + } + req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion) + + for k, v := range test.headers { + req.Header.Set(k, v) + } + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + defer resp.Body.Close() + + // Check returned status code + if resp.StatusCode != test.wantStatus { + errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode) + logError(testName, function, args, startTime, "", errStr, nil) + return + } + + // Check returned body + if test.wantBodyContains != "" { + body, err := io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read response body", err) + return + } + if !strings.Contains(string(body), test.wantBodyContains) { + errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body)) + logError(testName, function, args, startTime, "", errStr, nil) + return + } + } + + // Check returned response headers + for k, v := range test.wantHeaders { + gotVal := resp.Header.Get(k) + if k == "Access-Control-Expose-Headers" { + // MinIO returns this in canonical form, S3 does not. + gotVal = strings.ToLower(gotVal) + v = strings.ToLower(v) + } + // Remove all spaces, S3 adds spaces after CSV values in headers, MinIO does not. + gotVal = strings.ReplaceAll(gotVal, " ", "") + if gotVal != v { + errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal) + logError(testName, function, args, startTime, "", errStr, nil) + return + } + } + } + logSuccess(testName, function, args, startTime) + } + logSuccess(testName, function, args, startTime) +} + +func testCorsSetGetDelete() { + ctx := context.Background() + startTime := time.Now() + testName := getFuncName() + function := "SetBucketCors(bucketName, cors)" + args := map[string]interface{}{ + "bucketName": "", + "cors": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Set the CORS rules on the new bucket + corsRules := []cors.Rule{ + { + AllowedOrigin: []string{"http://www.example1.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + }, + { + AllowedOrigin: []string{"http://www.example2.com"}, + AllowedMethod: []string{"POST"}, + AllowedHeader: []string{"X-My-Special-Header"}, + }, + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{"GET"}, + AllowedHeader: []string{"*"}, + }, + } + corsConfig := cors.NewConfig(corsRules) + err = c.SetBucketCors(ctx, bucketName, corsConfig) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + return + } + + // Get the rules and check they match what we set + gotCorsConfig, err := c.GetBucketCors(ctx, bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketCors failed", err) + return + } + if !reflect.DeepEqual(corsConfig, gotCorsConfig) { + msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig) + logError(testName, function, args, startTime, "", msg, nil) + return + } + + // Delete the rules + err = c.SetBucketCors(ctx, bucketName, nil) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err) + return + } + + // Get the rules and check they are now empty + gotCorsConfig, err = c.GetBucketCors(ctx, bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketCors failed", err) + return + } + if gotCorsConfig != nil { + logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test deleting multiple objects with object retention set in Governance mode +func testRemoveObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + return + } + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + } + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + + objectsCh := make(chan minio.ObjectInfo) + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh <- object + } + }() + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + if rErr.Err == nil { + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + } + + objectsCh1 := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh1) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh1 <- object + } + }() + + opts1 := minio.RemoveObjectsOptions{ + GovernanceBypass: true, + } + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test deleting multiple objects with object retention set in Governance mode, via iterators +func testRemoveObjectsIter() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + return + } + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + } + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + + objectsIter := c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{ + WithVersions: true, + Recursive: true, + }) + results, err := c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error sending delete request", err) + return + } + for result := range results { + if result.Err != nil { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + break + } + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + + objectsIter = c.ListObjectsIter(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) + results, err = c.RemoveObjectsWithIter(context.Background(), bucketName, objectsIter, minio.RemoveObjectsOptions{ + GovernanceBypass: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "Error sending delete request", err) + return + } + for result := range results { + if result.Err != nil { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", result.Err) + return + } + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get bucket tags +func testGetBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetBucketTagging(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { + logError(testName, function, args, startTime, "", "Invalid error from server failed", err) + return + } + + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test setting tags for bucket +func testSetBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketTagging(bucketName, tags)" + args := map[string]interface{}{ + "bucketName": "", + "tags": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + tag := randString(60, rand.NewSource(time.Now().UnixNano()), "") + expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + t, err := tags.MapToBucketTags(map[string]string{ + tag: expectedValue, + }) + args["tags"] = t.String() + if err != nil { + logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err) + return + } + + err = c.SetBucketTagging(context.Background(), bucketName, t) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketTagging failed", err) + return + } + + tagging, err := c.GetBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketTagging failed", err) + return + } + + if tagging.ToMap()[tag] != expectedValue { + msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue) + logError(testName, function, args, startTime, "", msg, err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing bucket tags +func testRemoveBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveBucketTagging(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + tag := randString(60, rand.NewSource(time.Now().UnixNano()), "") + expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + t, err := tags.MapToBucketTags(map[string]string{ + tag: expectedValue, + }) + if err != nil { + logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err) + return + } + + err = c.SetBucketTagging(context.Background(), bucketName, t) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketTagging failed", err) + return + } + + tagging, err := c.GetBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketTagging failed", err) + return + } + + if tagging.ToMap()[tag] != expectedValue { + msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue) + logError(testName, function, args, startTime, "", msg, err) + return + } + + err = c.RemoveBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucketTagging failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != minio.NoSuchTagSet { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +// wantChecksums is a map of expected checksums for an object. +type wantChecksums map[minio.ChecksumType]string + +// cmpChecksum compares the checksums of an object against expected values. +func cmpChecksum(oi minio.ObjectInfo, chksums wantChecksums) error { + if oi.ChecksumCRC64NVME != chksums[minio.ChecksumCRC64NVME] { + return fmt.Errorf("Checksum mismatch for CRC64NVME, want: %s, got: %s", chksums[minio.ChecksumCRC64NVME], oi.ChecksumCRC64NVME) + } + if oi.ChecksumCRC32C != chksums[minio.ChecksumCRC32C] { + return fmt.Errorf("Checksum mismatch for CRC32C, want: %s, got: %s", chksums[minio.ChecksumCRC32C], oi.ChecksumCRC32C) + } + if oi.ChecksumCRC32 != chksums[minio.ChecksumCRC32] { + return fmt.Errorf("Checksum mismatch for CRC32, want: %s, got: %s", chksums[minio.ChecksumCRC32], oi.ChecksumCRC32) + } + if oi.ChecksumSHA1 != chksums[minio.ChecksumSHA1] { + return fmt.Errorf("Checksum mismatch for SHA1, want: %s, got: %s", chksums[minio.ChecksumSHA1], oi.ChecksumSHA1) + } + if oi.ChecksumSHA256 != chksums[minio.ChecksumSHA256] { + return fmt.Errorf("Checksum mismatch for SHA256, want: %s, got: %s", chksums[minio.ChecksumSHA256], oi.ChecksumSHA256) + } + return nil +} + +func main() { + slog.SetDefault(slog.New(slog.NewJSONHandler( + os.Stdout, + &slog.HandlerOptions{ + Level: slog.LevelInfo, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.MessageKey || a.Value.String() == "" { + return slog.Attr{} + } + + return a + }, + }, + ))) + + tls := mustParseBool(os.Getenv(enableHTTPS)) + kms := mustParseBool(os.Getenv(enableKMS)) + if os.Getenv(enableKMS) == "" { + // Default to KMS tests. + kms = true + } + + // execute tests + if isFullMode() { + testCopyObjectWithChecksums() + testReplaceObjectWithChecksums() + testCorsSetGetDelete() + testCors() + testListMultipartUpload() + testGetObjectAttributes() + testGetObjectAttributesErrorCases() + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testGetObjectRanges() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testUserMetadataCopyingV2() + testPutObjectWithChecksums() + testPutObjectWithTrailingChecksums() + testPutMultipartObjectWithChecksums() + testPutObject0ByteV2() + testPutObjectMetadataNonUSASCIIV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectContextV2() + testFPutObjectContextV2() + testFGetObjectContextV2() + testPutObjectContextV2() + testPutObjectWithVersioning() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testPutObjectPreconditionOnNonExistent() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testGetObjectS3Zip() + testRemoveMultipleObjects() + testRemoveMultipleObjectsWithResult() + testRemoveMultipleObjectsIter() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() + testPresignedPostPolicy() + testPresignedPostPolicyWrongFile() + testPresignedPostPolicyEmptyFileName() + testCopyObject() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testBucketNotification() + testFunctional() + testGetObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectContext() + testFPutObjectContext() + testFGetObjectContext() + testGetObjectACLContext() + testPutObjectContext() + testStorageClassMetadataPutObject() + testStorageClassInvalidMetadataPutObject() + testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() + testRemoveObjects() + testRemoveObjectsIter() + testListObjectVersions() + testStatObjectWithVersioning() + testGetObjectWithVersioning() + testCopyObjectWithVersioning() + testConcurrentCopyObjectWithVersioning() + testComposeObjectWithVersioning() + testRemoveObjectWithVersioning() + testRemoveObjectsWithVersioning() + testObjectTaggingWithVersioning() + testTrailingChecksums() + testPutObjectWithAutomaticChecksums() + testGetBucketTagging() + testSetBucketTagging() + testRemoveBucketTagging() + + // SSE-C tests will only work over TLS connection. + if tls { + testGetObjectAttributesSSECEncryption() + testSSECEncryptionPutGet() + testSSECEncryptionFPut() + testSSECEncryptedGetObjectReadAtFunctional() + testSSECEncryptedGetObjectReadSeekFunctional() + testEncryptedCopyObjectV2() + testEncryptedSSECToSSECCopyObject() + testEncryptedSSECToUnencryptedCopyObject() + testUnencryptedToSSECCopyObject() + testUnencryptedToUnencryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() + testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() + testSSECEncryptedToUnencryptedCopyPart() + testUnencryptedToSSECCopyObjectPart() + testUnencryptedToUnencryptedCopyPart() + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + } + + // KMS tests + if kms { + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() + } + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go new file mode 100644 index 000000000000..06dbbb74d70e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go @@ -0,0 +1,93 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "io" +) + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + n, err = sourceSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + } + + if hr.hook != nil { + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + var m int64 + m, err = hookSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + if n != m { + return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) + } + } + } + + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + if hr.hook != nil { + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return &hookReader{source: source} + } + return &hookReader{ + source: source, + hook: hook, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go new file mode 100644 index 000000000000..e71864ee937d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go @@ -0,0 +1,91 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cors + +import ( + "encoding/xml" + "fmt" + "io" + "strings" + + "github.com/dustin/go-humanize" +) + +const defaultXMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" + +// Config is the container for a CORS configuration for a bucket. +type Config struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []Rule `xml:"CORSRule"` +} + +// Rule is a single rule in a CORS configuration. +type Rule struct { + AllowedHeader []string `xml:"AllowedHeader,omitempty"` + AllowedMethod []string `xml:"AllowedMethod,omitempty"` + AllowedOrigin []string `xml:"AllowedOrigin,omitempty"` + ExposeHeader []string `xml:"ExposeHeader,omitempty"` + ID string `xml:"ID,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"` +} + +// NewConfig creates a new CORS configuration with the given rules. +func NewConfig(rules []Rule) *Config { + return &Config{ + XMLNS: defaultXMLNS, + XMLName: xml.Name{ + Local: "CORSConfiguration", + Space: defaultXMLNS, + }, + CORSRules: rules, + } +} + +// ParseBucketCorsConfig parses a CORS configuration in XML from an io.Reader. +func ParseBucketCorsConfig(reader io.Reader) (*Config, error) { + var c Config + + // Max size of cors document is 64KiB according to https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + // This limiter is just for safety so has a max of 128KiB + err := xml.NewDecoder(io.LimitReader(reader, 128*humanize.KiByte)).Decode(&c) + if err != nil { + return nil, fmt.Errorf("decoding xml: %w", err) + } + if c.XMLNS == "" { + c.XMLNS = defaultXMLNS + } + for i, rule := range c.CORSRules { + for j, method := range rule.AllowedMethod { + c.CORSRules[i].AllowedMethod[j] = strings.ToUpper(method) + } + } + return &c, nil +} + +// ToXML marshals the CORS configuration to XML. +func (c Config) ToXML() ([]byte, error) { + if c.XMLNS == "" { + c.XMLNS = defaultXMLNS + } + data, err := xml.Marshal(&c) + if err != nil { + return nil, fmt.Errorf("marshaling xml: %w", err) + } + return append([]byte(xml.Header), data...), nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go new file mode 100644 index 000000000000..415b07095206 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -0,0 +1,269 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/signer" +) + +// AssumeRoleResponse contains the result of successful AssumeRole request. +type AssumeRoleResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` + + Result AssumeRoleResult `xml:"AssumeRoleResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// AssumeRoleResult - Contains the response to a successful AssumeRole +// request, including temporary credentials that can be used to make +// MinIO API requests. +type AssumeRoleResult struct { + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize int `xml:",omitempty"` +} + +// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSAssumeRole struct { + Expiry + + // Optional http Client to use when connecting to MinIO STS service + // (overrides default client in CredContext) + Client *http.Client + + // STS endpoint to fetch STS credentials. + STSEndpoint string + + // various options for this request. + Options STSAssumeRoleOptions +} + +// STSAssumeRoleOptions collection of various input options +// to obtain AssumeRole credentials. +type STSAssumeRoleOptions struct { + // Mandatory inputs. + AccessKey string + SecretKey string + + SessionToken string // Optional if the first request is made with temporary credentials. + Policy string // Optional to assign a policy to the assumed role + + Location string // Optional commonly needed with AWS STS. + DurationSeconds int // Optional defaults to 1 hour. + + // Optional only valid if using with AWS STS + RoleARN string + RoleSessionName string + ExternalID string + + TokenRevokeType string // Optional, used for token revokation (MinIO only extension) +} + +// NewSTSAssumeRole returns a pointer to a new +// Credentials object wrapping the STSAssumeRole. +func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { + if opts.AccessKey == "" || opts.SecretKey == "" { + return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") + } + return New(&STSAssumeRole{ + STSEndpoint: stsEndpoint, + Options: opts, + }), nil +} + +const defaultDurationSeconds = 3600 + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { + v := url.Values{} + v.Set("Action", "AssumeRole") + v.Set("Version", STSVersion) + if opts.RoleARN != "" { + v.Set("RoleArn", opts.RoleARN) + } + if opts.RoleSessionName != "" { + v.Set("RoleSessionName", opts.RoleSessionName) + } + if opts.DurationSeconds > defaultDurationSeconds { + v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) + } else { + v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) + } + if opts.Policy != "" { + v.Set("Policy", opts.Policy) + } + if opts.ExternalID != "" { + v.Set("ExternalId", opts.ExternalID) + } + if opts.TokenRevokeType != "" { + v.Set("TokenRevokeType", opts.TokenRevokeType) + } + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleResponse{}, err + } + u.Path = "/" + + postBody := strings.NewReader(v.Encode()) + hash := sha256.New() + if _, err = io.Copy(hash, postBody); err != nil { + return AssumeRoleResponse{}, err + } + postBody.Seek(0, 0) + + req, err := http.NewRequest(http.MethodPost, u.String(), postBody) + if err != nil { + return AssumeRoleResponse{}, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) + if opts.SessionToken != "" { + req.Header.Set("X-Amz-Security-Token", opts.SessionToken) + } + req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleResponse{}, err + } + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return AssumeRoleResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleResponse{}, errResp + } + + a := AssumeRoleResponse{} + if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil { + return AssumeRoleResponse{}, err + } + return a, nil +} + +// RetrieveWithCredContext retrieves credentials from the MinIO service. +// Error will be returned if the request fails, optional cred context. +func (m *STSAssumeRole) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + stsEndpoint := m.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + a, err := getAssumeRoleCredentials(client, stsEndpoint, m.Options) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSAssumeRole) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go new file mode 100644 index 000000000000..5ef3597d1047 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -0,0 +1,106 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the no credentials value. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again after IsExpired() is true. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// RetrieveWithCredContext is like Retrieve with CredContext +func (c *Chain) RetrieveWithCredContext(cc *CredContext) (Value, error) { + for _, p := range c.Providers { + creds, _ := p.RetrieveWithCredContext(cc) + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// Retrieve returns the credentials value, returns no credentials(anonymous) +// if no credentials provider returned any value. +// +// If a provider is found with credentials, it will be cached and any calls +// to IsExpired() will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + for _, p := range c.Providers { + creds, _ := p.Retrieve() + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample new file mode 100644 index 000000000000..d793c9e0e97b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.min.io", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go new file mode 100644 index 000000000000..52aff9a57f6f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -0,0 +1,242 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "net/http" + "sync" + "time" +) + +const ( + // STSVersion sts version string + STSVersion = "2011-06-15" + + // How much duration to slash from the given expiration duration + defaultExpiryWindow = 0.8 +) + +// defaultCredContext is used when the credential context doesn't +// actually matter or the default context is suitable. +var defaultCredContext = &CredContext{Client: http.DefaultClient} + +// A Value is the S3 credentials value for individual credential fields. +type Value struct { + // S3 Access key ID + AccessKeyID string + + // S3 Secret Access Key + SecretAccessKey string + + // S3 Session Token + SessionToken string + + // Expiration of this credentials - null means no expiration associated + Expiration time.Time + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // RetrieveWithCredContext returns nil if it successfully retrieved the + // value. Error is returned if the value were not obtainable, or empty. + // optionally takes CredContext for additional context to retrieve credentials. + RetrieveWithCredContext(cc *CredContext) (Value, error) + + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + // + // Deprecated: Retrieve() exists for historical compatibility and should not + // be used. To get new credentials use the RetrieveWithCredContext function + // to ensure the proper context (i.e. HTTP client) will be used. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// CredContext is passed to the Retrieve function of a provider to provide +// some additional context to retrieve credentials. +type CredContext struct { + // Client specifies the HTTP client that should be used if an HTTP + // request is to be made to fetch the credentials. + Client *http.Client + + // Endpoint specifies the MinIO endpoint that will be used if no + // explicit endpoint is provided. + Endpoint string +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + cut := window + if cut < 0 { + expireIn := expiration.Sub(e.CurrentTime()) + cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) + } + e.expiration = expiration.Add(-cut) +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Deprecated: Get() exists for historical compatibility and should not be +// used. To get new credentials use the Credentials.GetWithContext function +// to ensure the proper context (i.e. HTTP client) will be used. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(nil) +} + +// GetWithContext returns the credentials value, or error if the +// credentials Value failed to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) GetWithContext(cc *CredContext) (Value, error) { + if c == nil { + return Value{}, nil + } + if cc == nil { + cc = defaultCredContext + } + + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.RetrieveWithCredContext(cc) + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 000000000000..afbfad559ece --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json @@ -0,0 +1,7 @@ +{ + "Version": 1, + "SessionToken": "token", + "AccessKeyId": "accessKey", + "SecretAccessKey": "secret", + "Expiration": "9999-04-27T16:02:25.000Z" +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample new file mode 100644 index 000000000000..e2dc1bfecb1d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -0,0 +1,15 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret + +[with_process] +credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go new file mode 100644 index 000000000000..fbfb105491db --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -0,0 +1,60 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// # Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go new file mode 100644 index 000000000000..21ab0a38a4df --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go @@ -0,0 +1,80 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +func (e *EnvAWS) retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + return e.retrieve() +} + +// RetrieveWithCredContext is like Retrieve (no-op input of Cred Context) +func (e *EnvAWS) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return e.retrieve() +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go new file mode 100644 index 000000000000..dbfbdfcef1d1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +// * Access Key ID: MINIO_ROOT_USER. +// * Secret Access Key: MINIO_ROOT_PASSWORD. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +func (e *EnvMinio) retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ROOT_USER") + secret := os.Getenv("MINIO_ROOT_PASSWORD") + + signerType := SignatureV4 + if id == "" || secret == "" { + id = os.Getenv("MINIO_ACCESS_KEY") + secret = os.Getenv("MINIO_SECRET_KEY") + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + return e.retrieve() +} + +// RetrieveWithCredContext is like Retrieve() (no-op input cred context) +func (e *EnvMinio) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return e.retrieve() +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go new file mode 100644 index 000000000000..07a9c2f0927f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go @@ -0,0 +1,95 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" +) + +// ErrorResponse - Is the typed error returned. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"` + STSError struct { + Type string `xml:"Type"` + Code string `xml:"Code"` + Message string `xml:"Message"` + } `xml:"Error"` + RequestID string `xml:"RequestId"` +} + +// Error - Is the typed error returned by all API operations. +type Error struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// Error - Returns S3 error string. +func (e Error) Error() string { + if e.Message == "" { + return fmt.Sprintf("Error response code %s.", e.Code) + } + return e.Message +} + +// Error - Returns STS error string. +func (e ErrorResponse) Error() string { + if e.STSError.Message == "" { + return fmt.Sprintf("Error response code %s.", e.STSError.Code) + } + return e.STSError.Message +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go new file mode 100644 index 000000000000..0c83fc7fa4c7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,167 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "errors" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/go-ini/ini" +) + +// A externalProcessCredentials stores the output of a credential_process +type externalProcessCredentials struct { + Version int + SessionToken string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Expiration time.Time +} + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + Expiry + + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename, profile string) *Credentials { + return New(&FileAWSCredentials{ + Filename: filename, + Profile: profile, + }) +} + +func (p *FileAWSCredentials) retrieve() (Value, error) { + if p.Filename == "" { + p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.Filename == "" { + homeDir, err := os.UserHomeDir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + if p.Profile == "" { + p.Profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.Filename, p.Profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + // If credential_process is defined, obtain credentials by executing + // the external process + credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) + if credentialProcess != "" { + args := strings.Fields(credentialProcess) + if len(args) <= 1 { + return Value{}, errors.New("invalid credential process args") + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.Output() + if err != nil { + return Value{}, err + } + var externalProcessCredentials externalProcessCredentials + err = json.Unmarshal([]byte(out), &externalProcessCredentials) + if err != nil { + return Value{}, err + } + p.retrieved = true + p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: externalProcessCredentials.AccessKeyID, + SecretAccessKey: externalProcessCredentials.SecretAccessKey, + SessionToken: externalProcessCredentials.SessionToken, + Expiration: externalProcessCredentials.Expiration, + SignerType: SignatureV4, + }, nil + } + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + return p.retrieve() +} + +// RetrieveWithCredContext is like Retrieve(), cred context is no-op for File credentials +func (p *FileAWSCredentials) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return p.retrieve() +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go new file mode 100644 index 000000000000..b78dcaccf8fe --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -0,0 +1,145 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "os" + "path/filepath" + "runtime" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + Filename string + + // MinIO Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "s3" if + // environment variable is also not set. + Alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename, alias string) *Credentials { + return New(&FileMinioClient{ + Filename: filename, + Alias: alias, + }) +} + +func (p *FileMinioClient) retrieve() (Value, error) { + if p.Filename == "" { + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.Filename = value + } else { + homeDir, err := os.UserHomeDir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.Filename = filepath.Join(homeDir, "mc", "config.json") + } + } + } + + if p.Alias == "" { + p.Alias = os.Getenv("MINIO_ALIAS") + if p.Alias == "" { + p.Alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.Filename, p.Alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + return p.retrieve() +} + +// RetrieveWithCredContext - is like Retrieve() +func (p *FileMinioClient) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return p.retrieve() +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` + Aliases map[string]hostConfig `json:"aliases"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + configBytes, err := os.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + + if cfg.Version == "10" { + return cfg.Aliases[alias], nil + } + + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go new file mode 100644 index 000000000000..f4f7c8f7e290 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -0,0 +1,471 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. +// When used the tokens refresh will be triggered when 80% of the elapsed +// time until the actual expiration time is passed. +const DefaultExpiryWindow = -1 + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Optional http Client to use when connecting to IAM metadata service + // (overrides default client in CredContext) + Client *http.Client + + // Custom endpoint to fetch IAM role credentials. + Endpoint string + + // Region configurable custom region for STS + Region string + + // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html + Container struct { + AuthorizationToken string + AuthorizationTokenFile string + CredentialsFullURI string + CredentialsRelativeURI string + } + + // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html + EKSIdentity struct { + TokenFile string + RoleARN string + RoleSessionName string + } +} + +// IAM Roles for Amazon EC2 +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +const ( + DefaultIAMRoleEndpoint = "http://169.254.169.254" + DefaultECSRoleEndpoint = "http://169.254.170.2" + DefaultSTSRoleEndpoint = "https://sts.amazonaws.com" + DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" + TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + TokenPath = "/latest/api/token" + TokenTTL = "21600" + TokenRequestHeader = "X-aws-ec2-metadata-token" +) + +// NewIAM returns a pointer to a new Credentials object wrapping the IAM. +func NewIAM(endpoint string) *Credentials { + return New(&IAM{ + Endpoint: endpoint, + }) +} + +// RetrieveWithCredContext is like Retrieve with Cred Context +func (m *IAM) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") + if token == "" { + token = m.Container.AuthorizationToken + } + + tokenFile := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE") + if tokenFile == "" { + tokenFile = m.Container.AuthorizationToken + } + + relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if relativeURI == "" { + relativeURI = m.Container.CredentialsRelativeURI + } + + fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") + if fullURI == "" { + fullURI = m.Container.CredentialsFullURI + } + + identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") + if identityFile == "" { + identityFile = m.EKSIdentity.TokenFile + } + + roleArn := os.Getenv("AWS_ROLE_ARN") + if roleArn == "" { + roleArn = m.EKSIdentity.RoleARN + } + + roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME") + if roleSessionName == "" { + roleSessionName = m.EKSIdentity.RoleSessionName + } + + region := os.Getenv("AWS_REGION") + if region == "" { + region = m.Region + } + + var roleCreds ec2RoleCredRespBody + var err error + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + endpoint := m.Endpoint + + switch { + case identityFile != "": + if len(endpoint) == 0 { + if region != "" { + if strings.HasPrefix(region, "cn-") { + endpoint = "https://sts." + region + ".amazonaws.com.cn" + } else { + endpoint = "https://sts." + region + ".amazonaws.com" + } + } else { + endpoint = DefaultSTSRoleEndpoint + } + } + + creds := &STSWebIdentity{ + Client: client, + STSEndpoint: endpoint, + GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { + token, err := os.ReadFile(identityFile) + if err != nil { + return nil, err + } + + return &WebIdentityToken{Token: string(token)}, nil + }, + RoleARN: roleArn, + roleSessionName: roleSessionName, + } + + stsWebIdentityCreds, err := creds.RetrieveWithCredContext(cc) + if err == nil { + m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) + } + return stsWebIdentityCreds, err + + case relativeURI != "": + if len(endpoint) == 0 { + endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI) + } + + roleCreds, err = getEcsTaskCredentials(client, endpoint, token) + + case tokenFile != "" && fullURI != "": + endpoint = fullURI + roleCreds, err = getEKSPodIdentityCredentials(client, endpoint, tokenFile) + + case fullURI != "": + if len(endpoint) == 0 { + endpoint = fullURI + var ok bool + if ok, err = isLoopback(endpoint); !ok { + if err == nil { + err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) + } + break + } + } + + roleCreds, err = getEcsTaskCredentials(client, endpoint, token) + + default: + roleCreds, err = getCredentials(client, endpoint) + } + + if err != nil { + return Value{}, err + } + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Expiration: roleCreds.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string + + // Unused params. + LastUpdated time.Time + Type string +} + +// Get the final IAM role URL where the request will +// be sent to fetch the rolling access credentials. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func getIAMRoleURL(endpoint string) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = DefaultIAMSecurityCredsPath + return u, nil +} + +// listRoleNames lists of credential role names associated +// with the current EC2 service. If there are no credentials, +// or there is an error making or receiving the request. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + if token != "" { + req.Header.Add(TokenRequestHeader, token) + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest(http.MethodGet, endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if token != "" { + req.Header.Set("Authorization", token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + +func getEKSPodIdentityCredentials(client *http.Client, endpoint string, tokenFile string) (ec2RoleCredRespBody, error) { + if tokenFile != "" { + bytes, err := os.ReadFile(tokenFile) + if err != nil { + return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: failed to read token file:%s", err) + } + token := string(bytes) + return getEcsTaskCredentials(client, endpoint, token) + } + return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: no tokenFile found") +} + +func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil) + if err != nil { + return "", err + } + req.Header.Add(TokenRequestTTLHeader, TokenTTL) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", errors.New(resp.Status) + } + return string(data), nil +} + +// getCredentials - obtains the credentials from the IAM role name associated with +// the current EC2 service. +// +// If the credentials cannot be found, or there is an error +// reading the response an error will be returned. +func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + if endpoint == "" { + endpoint = DefaultIAMRoleEndpoint + } + + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + token, err := fetchIMDSToken(client, endpoint) + if err != nil { + // Return only errors for valid situations, if the IMDSv2 is not enabled + // we will not be able to get the token, in such a situation we have + // to rely on IMDSv1 behavior as a fallback, this check ensures that. + // Refer https://github.com/minio/minio-go/issues/1866 + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + return ec2RoleCredRespBody{}, err + } + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + u, err := getIAMRoleURL(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + roleNames, err := listRoleNames(client, u, token) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if len(roleNames) == 0 { + return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // - An instance profile can contain only one IAM role. This limit cannot be increased. + roleName := roleNames[0] + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // The following command retrieves the security credentials for an + // IAM role named `s3access`. + // + // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access + // + u.Path = path.Join(u.Path, roleName) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + if token != "" { + req.Header.Add(TokenRequestHeader, token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} + +// isLoopback identifies if a uri's host is on a loopback address +func isLoopback(uri string) (bool, error) { + u, err := url.Parse(uri) + if err != nil { + return false, err + } + + host := u.Hostname() + if len(host) == 0 { + return false, fmt.Errorf("can't parse host from uri: %s", uri) + } + + ips, err := net.LookupHost(host) + if err != nil { + return false, err + } + for _, ip := range ips { + if !net.ParseIP(ip).IsLoopback() { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go new file mode 100644 index 000000000000..b79433305648 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go new file mode 100644 index 000000000000..d90c98c84d55 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go @@ -0,0 +1,72 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// RetrieveWithCredContext returns the static credentials. +func (s *Static) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return s.Retrieve() +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go new file mode 100644 index 000000000000..ef6f436b84b0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -0,0 +1,203 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// AssumedRoleUser - The identifiers for the temporary security credentials that +// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + Arn string + AssumedRoleID string `xml:"AssumeRoleId"` +} + +// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. +type AssumeRoleWithClientGrantsResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` + Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants +// request, including temporary credentials that can be used to make MinIO API requests. +type ClientGrantsResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromClientGrantsToken string `xml:",omitempty"` +} + +// ClientGrantsToken - client grants token with expiry. +type ClientGrantsToken struct { + Token string + Expiry int +} + +// A STSClientGrants retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSClientGrants struct { + Expiry + + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) + Client *http.Client + + // MinIO endpoint to fetch STS credentials. + STSEndpoint string + + // getClientGrantsTokenExpiry function to retrieve tokens + // from IDP This function should return two values one is + // accessToken which is a self contained access token (JWT) + // and second return value is the expiry associated with + // this token. This is a customer provided function and + // is mandatory. + GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) +} + +// NewSTSClientGrants returns a pointer to a new +// Credentials object wrapping the STSClientGrants. +func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { + if getClientGrantsTokenExpiry == nil { + return nil, errors.New("Client grants access token and expiry retrieval function should be defined") + } + return New(&STSClientGrants{ + STSEndpoint: stsEndpoint, + GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, + }), nil +} + +func getClientGrantsCredentials(clnt *http.Client, endpoint string, + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), +) (AssumeRoleWithClientGrantsResponse, error) { + accessToken, err := getClientGrantsTokenExpiry() + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithClientGrants") + v.Set("Token", accessToken.Token) + v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithClientGrantsResponse{}, errResp + } + + a := AssumeRoleWithClientGrantsResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + return a, nil +} + +// RetrieveWithCredContext is like Retrieve() with cred context +func (m *STSClientGrants) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + stsEndpoint := m.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + a, err := getClientGrantsCredentials(client, stsEndpoint, m.GetClientGrantsTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go new file mode 100644 index 000000000000..e9e7a1151f15 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -0,0 +1,179 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// CustomTokenResult - Contains temporary creds and user metadata. +type CustomTokenResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId"` + SecretKey string `xml:"SecretAccessKey"` + Expiration time.Time `xml:"Expiration"` + SessionToken string `xml:"SessionToken"` + } `xml:",omitempty"` + + AssumedUser string `xml:",omitempty"` +} + +// AssumeRoleWithCustomTokenResponse contains the result of a successful +// AssumeRoleWithCustomToken request. +type AssumeRoleWithCustomTokenResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"` + Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"` + Metadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// CustomTokenIdentity - satisfies the Provider interface, and retrieves +// credentials from MinIO using the AssumeRoleWithCustomToken STS API. +type CustomTokenIdentity struct { + Expiry + + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) + Client *http.Client + + // MinIO server STS endpoint to fetch STS credentials. + STSEndpoint string + + // The custom token to use with the request. + Token string + + // RoleArn associated with the identity + RoleArn string + + // RequestedExpiry is to set the validity of the generated credentials + // (this value bounded by server). + RequestedExpiry time.Duration + + // Optional, used for token revokation + TokenRevokeType string +} + +// RetrieveWithCredContext with Retrieve optionally cred context +func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) { + if cc == nil { + cc = defaultCredContext + } + + stsEndpoint := c.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + u, err := url.Parse(stsEndpoint) + if err != nil { + return value, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithCustomToken") + v.Set("Version", STSVersion) + v.Set("RoleArn", c.RoleArn) + v.Set("Token", c.Token) + if c.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds()))) + } + if c.TokenRevokeType != "" { + v.Set("TokenRevokeType", c.TokenRevokeType) + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return value, err + } + + client := c.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + resp, err := client.Do(req) + if err != nil { + return value, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return value, errors.New(resp.Status) + } + + r := AssumeRoleWithCustomTokenResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return value, err + } + + cr := r.Result.Credentials + c.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + Expiration: cr.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Retrieve - to satisfy Provider interface; fetches credentials from MinIO. +func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { + return c.RetrieveWithCredContext(nil) +} + +// NewCustomTokenCredentials - returns credentials using the +// AssumeRoleWithCustomToken STS API. +func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) { + c := CustomTokenIdentity{ + STSEndpoint: stsEndpoint, + Token: token, + RoleArn: roleArn, + } + for _, optFunc := range optFuncs { + optFunc(&c) + } + return New(&c), nil +} + +// CustomTokenOpt is a function type to configure the custom-token based +// credentials using NewCustomTokenCredentials. +type CustomTokenOpt func(*CustomTokenIdentity) + +// CustomTokenValidityOpt sets the validity duration of the requested +// credentials. This value is ignored if the server enforces a lower validity +// period. +func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt { + return func(c *CustomTokenIdentity) { + c.RequestedExpiry = d + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go new file mode 100644 index 000000000000..7e80cd6a2ac9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -0,0 +1,235 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// AssumeRoleWithLDAPResponse contains the result of successful +// AssumeRoleWithLDAPIdentity request +type AssumeRoleWithLDAPResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` + Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// LDAPIdentityResult - contains credentials for a successful +// AssumeRoleWithLDAPIdentity request. +type LDAPIdentityResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + SubjectFromToken string `xml:",omitempty"` +} + +// LDAPIdentity retrieves credentials from MinIO +type LDAPIdentity struct { + Expiry + + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // LDAP username/password used to fetch LDAP STS credentials. + LDAPUsername, LDAPPassword string + + // Session policy to apply to the generated credentials. Leave empty to + // use the full access policy available to the user. + Policy string + + // RequestedExpiry is the configured expiry duration for credentials + // requested from LDAP. + RequestedExpiry time.Duration + + // Optional, if empty applies to default config + ConfigName string + + // Optional, used for token revokation + TokenRevokeType string +} + +// NewLDAPIdentity returns new credentials object that uses LDAP +// Identity. +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { + l := LDAPIdentity{ + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + } + for _, optFunc := range optFuncs { + optFunc(&l) + } + return New(&l), nil +} + +// LDAPIdentityOpt is a function type used to configured the LDAPIdentity +// instance. +type LDAPIdentityOpt func(*LDAPIdentity) + +// LDAPIdentityPolicyOpt sets the session policy for requested credentials. +func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.Policy = policy + } +} + +// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. +func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.RequestedExpiry = d + } +} + +// LDAPIdentityConfigNameOpt sets the config name for requested credentials. +func LDAPIdentityConfigNameOpt(name string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.ConfigName = name + } +} + +// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses +// LDAP Identity with a specified session policy. The `policy` parameter must be +// a JSON string specifying the policy document. +// +// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. +func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { + return New(&LDAPIdentity{ + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + Policy: policy, + }), nil +} + +// RetrieveWithCredContext gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) { + if cc == nil { + cc = defaultCredContext + } + + stsEndpoint := k.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + u, err := url.Parse(stsEndpoint) + if err != nil { + return value, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithLDAPIdentity") + v.Set("Version", STSVersion) + v.Set("LDAPUsername", k.LDAPUsername) + v.Set("LDAPPassword", k.LDAPPassword) + if k.Policy != "" { + v.Set("Policy", k.Policy) + } + if k.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) + } + if k.TokenRevokeType != "" { + v.Set("TokenRevokeType", k.TokenRevokeType) + } + if k.ConfigName != "" { + v.Set("ConfigName", k.ConfigName) + } + + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return value, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + client := k.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + resp, err := client.Do(req) + if err != nil { + return value, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return value, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return value, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return value, errResp + } + + r := AssumeRoleWithLDAPResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return value, err + } + + cr := r.Result.Credentials + k.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + Expiration: cr.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Retrieve gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) Retrieve() (value Value, err error) { + return k.RetrieveWithCredContext(defaultCredContext) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go new file mode 100644 index 000000000000..beab4a6a6487 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -0,0 +1,229 @@ +// MinIO Go Library for Amazon S3 Compatible Cloud Storage +// Copyright 2021 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "bytes" + "crypto/tls" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +// CertificateIdentityOption is an optional AssumeRoleWithCertificate +// parameter - e.g. a custom HTTP transport configuration or S3 credental +// livetime. +type CertificateIdentityOption func(*STSCertificateIdentity) + +// CertificateIdentityWithTransport returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given http.RoundTripper. +func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { + if i.Client == nil { + i.Client = &http.Client{} + } + i.Client.Transport = t + }) +} + +// CertificateIdentityWithExpiry returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given livetime. +// +// Fetched S3 credentials will have the given livetime if the STS server +// allows such credentials. +func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) +} + +// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and +// rotates those credentials once they expire. +type STSCertificateIdentity struct { + Expiry + + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) + Client *http.Client + + // STSEndpoint is the base URL endpoint of the STS API. + // For example, https://minio.local:9000 + STSEndpoint string + + // S3CredentialLivetime is the duration temp. S3 access + // credentials should be valid. + // + // It represents the access credential livetime requested + // by the client. The STS server may choose to issue + // temp. S3 credentials that have a different - usually + // shorter - livetime. + // + // The default livetime is one hour. + S3CredentialLivetime time.Duration + + // Certificate is the client certificate that is used for + // STS authentication. + Certificate tls.Certificate + + // Optional, used for token revokation + TokenRevokeType string +} + +// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates +// to the given STS endpoint with the given TLS certificate and retrieves and +// rotates S3 credentials. +func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { + identity := &STSCertificateIdentity{ + STSEndpoint: endpoint, + Certificate: certificate, + } + for _, option := range options { + option(identity) + } + return New(identity), nil +} + +// RetrieveWithCredContext is Retrieve with cred context +func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + stsEndpoint := i.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + endpointURL, err := url.Parse(stsEndpoint) + if err != nil { + return Value{}, err + } + livetime := i.S3CredentialLivetime + if livetime == 0 { + livetime = 1 * time.Hour + } + + queryValues := url.Values{} + queryValues.Set("Action", "AssumeRoleWithCertificate") + queryValues.Set("Version", STSVersion) + queryValues.Set("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) + if i.TokenRevokeType != "" { + queryValues.Set("TokenRevokeType", i.TokenRevokeType) + } + endpointURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) + if err != nil { + return Value{}, err + } + + client := i.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + tr, ok := client.Transport.(*http.Transport) + if !ok { + return Value{}, fmt.Errorf("CredContext should contain an http.Transport value") + } + + // Clone the HTTP transport (patch the TLS client certificate) + trCopy := tr.Clone() + trCopy.TLSClientConfig.Certificates = []tls.Certificate{i.Certificate} + + // Clone the HTTP client (patch the HTTP transport) + clientCopy := *client + clientCopy.Transport = trCopy + + resp, err := clientCopy.Do(req) + if err != nil { + return Value{}, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return Value{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return Value{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return Value{}, errResp + } + + const MaxSize = 10 * 1 << 20 + var body io.Reader = resp.Body + if resp.ContentLength > 0 && resp.ContentLength < MaxSize { + body = io.LimitReader(body, resp.ContentLength) + } else { + body = io.LimitReader(body, MaxSize) + } + + var response assumeRoleWithCertificateResponse + if err = xml.NewDecoder(body).Decode(&response); err != nil { + return Value{}, err + } + i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: response.Result.Credentials.AccessKey, + SecretAccessKey: response.Result.Credentials.SecretKey, + SessionToken: response.Result.Credentials.SessionToken, + Expiration: response.Result.Credentials.Expiration, + SignerType: SignatureDefault, + }, nil +} + +// Retrieve fetches a new set of S3 credentials from the configured STS API endpoint. +func (i *STSCertificateIdentity) Retrieve() (Value, error) { + return i.RetrieveWithCredContext(defaultCredContext) +} + +// Expiration returns the expiration time of the current S3 credentials. +func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } + +type assumeRoleWithCertificateResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` + Result struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:"Credentials" json:"credentials,omitempty"` + } `xml:"AssumeRoleWithCertificateResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go new file mode 100644 index 000000000000..a9987255ec7f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -0,0 +1,271 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. +type AssumeRoleWithWebIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` + Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity +// request, including temporary credentials that can be used to make MinIO API requests. +type WebIdentityResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromWebIdentityToken string `xml:",omitempty"` +} + +// WebIdentityToken - web identity token with expiry. +type WebIdentityToken struct { + Token string + AccessToken string + RefreshToken string + Expiry int +} + +// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSWebIdentity struct { + Expiry + + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // Exported GetWebIDTokenExpiry function which returns ID + // tokens from IDP. This function should return two values + // one is ID token which is a self contained ID token (JWT) + // and second return value is the expiry associated with + // this token. + // This is a customer provided function and is mandatory. + GetWebIDTokenExpiry func() (*WebIdentityToken, error) + + // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is + // assuming. + RoleARN string + + // Policy is the policy where the credentials should be limited too. + Policy string + + // roleSessionName is the identifier for the assumed role session. + roleSessionName string + + // Optional, used for token revokation + TokenRevokeType string +} + +// NewSTSWebIdentity returns a pointer to a new +// Credentials object wrapping the STSWebIdentity. +func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) { + if getWebIDTokenExpiry == nil { + return nil, errors.New("Web ID token and expiry retrieval function should be defined") + } + i := &STSWebIdentity{ + STSEndpoint: stsEndpoint, + GetWebIDTokenExpiry: getWebIDTokenExpiry, + } + for _, o := range opts { + o(i) + } + return New(i), nil +} + +// NewKubernetesIdentity returns a pointer to a new +// Credentials object using the Kubernetes service account +func NewKubernetesIdentity(stsEndpoint string, opts ...func(*STSWebIdentity)) (*Credentials, error) { + return NewSTSWebIdentity(stsEndpoint, func() (*WebIdentityToken, error) { + token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") + if err != nil { + return nil, err + } + + return &WebIdentityToken{ + Token: string(token), + }, nil + }, opts...) +} + +// WithPolicy option will enforce that the returned credentials +// will be scoped down to the specified policy +func WithPolicy(policy string) func(*STSWebIdentity) { + return func(i *STSWebIdentity) { + i.Policy = policy + } +} + +func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, policy string, + getWebIDTokenExpiry func() (*WebIdentityToken, error), tokenRevokeType string, +) (AssumeRoleWithWebIdentityResponse, error) { + idToken, err := getWebIDTokenExpiry() + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithWebIdentity") + if len(roleARN) > 0 { + v.Set("RoleArn", roleARN) + + if len(roleSessionName) == 0 { + roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) + } + v.Set("RoleSessionName", roleSessionName) + } + v.Set("WebIdentityToken", idToken.Token) + if idToken.AccessToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityAccessToken", idToken.AccessToken) + } + if idToken.RefreshToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityRefreshToken", idToken.RefreshToken) + } + if idToken.Expiry > 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + } + if policy != "" { + v.Set("Policy", policy) + } + v.Set("Version", STSVersion) + if tokenRevokeType != "" { + v.Set("TokenRevokeType", tokenRevokeType) + } + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithWebIdentityResponse{}, errResp + } + + a := AssumeRoleWithWebIdentityResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + return a, nil +} + +// RetrieveWithCredContext is like Retrieve with optional cred context. +func (m *STSWebIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + stsEndpoint := m.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry, m.TokenRevokeType) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} + +// Expiration returns the expiration time of the credentials +func (m *STSWebIdentity) Expiration() time.Time { + return m.expiration +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 000000000000..6db26c036f56 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go @@ -0,0 +1,24 @@ +//go:build !fips +// +build !fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 000000000000..640258242f90 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go @@ -0,0 +1,24 @@ +//go:build fips +// +build fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go new file mode 100644 index 000000000000..0a8a7baa20d6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -0,0 +1,197 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "net/http" + + "golang.org/x/crypto/argon2" +) + +const ( + // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + SseGenericHeader = "X-Amz-Server-Side-Encryption" + + // SseKmsKeyID is the AWS SSE-KMS key id. + SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id" + // SseEncryptionContext is the AWS SSE-KMS Encryption Context data. + SseEncryptionContext = SseGenericHeader + "-Context" + + // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm" + // SseCustomerKey is the AWS SSE-C encryption key HTTP header key. + SseCustomerKey = SseGenericHeader + "-Customer-Key" + // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5" + + // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(SseCustomerAlgorithm, "AES256") + h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(SseCopyCustomerAlgorithm, "AES256") + h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(SseGenericHeader, "aws:kms") + if s.key != "" { + h.Set(SseKmsKeyID, s.key) + } + if s.hasContext { + h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/kvcache/cache.go b/vendor/github.com/minio/minio-go/v7/pkg/kvcache/cache.go new file mode 100644 index 000000000000..b37514fa37e7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/kvcache/cache.go @@ -0,0 +1,54 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kvcache + +import "sync" + +// Cache - Provides simple mechanism to hold any key value in memory +// wrapped around via sync.Map but typed with generics. +type Cache[K comparable, V any] struct { + m sync.Map +} + +// Delete delete the key +func (r *Cache[K, V]) Delete(key K) { + r.m.Delete(key) +} + +// Get - Returns a value of a given key if it exists. +func (r *Cache[K, V]) Get(key K) (value V, ok bool) { + return r.load(key) +} + +// Set - Will persist a value into cache. +func (r *Cache[K, V]) Set(key K, value V) { + r.store(key, value) +} + +func (r *Cache[K, V]) load(key K) (V, bool) { + value, ok := r.m.Load(key) + if !ok { + var zero V + return zero, false + } + return value.(V), true +} + +func (r *Cache[K, V]) store(key K, value V) { + r.m.Store(key, value) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go new file mode 100644 index 000000000000..7ed98b0d133a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -0,0 +1,542 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lifecycle contains all the lifecycle related data types and marshallers. +package lifecycle + +import ( + "encoding/json" + "encoding/xml" + "errors" + "time" +) + +var errMissingStorageClass = errors.New("storage-class cannot be empty") + +// AbortIncompleteMultipartUpload structure, not supported yet on MinIO +type AbortIncompleteMultipartUpload struct { + XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` + DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { + return n.DaysAfterInitiation == ExpirationDays(0) +} + +// MarshalXML if days after initiation is set to non-zero value +func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload + return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) +} + +// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. +// Upon expiration, server permanently deletes the noncurrent object versions. +// Set this lifecycle configuration action on a bucket that has versioning enabled +// (or suspended) to request server delete noncurrent object versions at a +// specific period in the object's lifetime. +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` +} + +// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions. +func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.isNull() { + return nil + } + type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration + return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +func (n NoncurrentVersionExpiration) isNull() bool { + return n.IsDaysNull() && n.NewerNoncurrentVersions == 0 +} + +// NoncurrentVersionTransition structure, set this action to request server to +// transition noncurrent object versions to different set storage classes +// at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionTransition) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +// IsStorageClassEmpty returns true if storage class field is empty +func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { + return n.StorageClass == "" +} + +func (n NoncurrentVersionTransition) isNull() bool { + return n.StorageClass == "" +} + +// UnmarshalJSON implements NoncurrentVersionTransition JSONify +func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { + type noncurrentVersionTransition NoncurrentVersionTransition + var nt noncurrentVersionTransition + err := json.Unmarshal(b, &nt) + if err != nil { + return err + } + + if nt.StorageClass == "" { + return errMissingStorageClass + } + *n = NoncurrentVersionTransition(nt) + return nil +} + +// MarshalXML is extended to leave out +// tags +func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.isNull() { + return nil + } + type noncurrentVersionTransitionWrapper NoncurrentVersionTransition + return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) +} + +// Tag structure key/value pair representing an object tag to apply lifecycle configuration +type Tag struct { + XMLName xml.Name `xml:"Tag,omitempty" json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Transition structure - transition details of lifecycle configuration +type Transition struct { + XMLName xml.Name `xml:"Transition" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + Days ExpirationDays `xml:"Days" json:"Days"` +} + +// UnmarshalJSON returns an error if storage-class is empty. +func (t *Transition) UnmarshalJSON(b []byte) error { + type transition Transition + var tr transition + err := json.Unmarshal(b, &tr) + if err != nil { + return err + } + + if tr.StorageClass == "" { + return errMissingStorageClass + } + *t = Transition(tr) + return nil +} + +// MarshalJSON customizes json encoding by omitting empty values +func (t Transition) MarshalJSON() ([]byte, error) { + if t.IsNull() { + return nil, nil + } + type transition struct { + Date *ExpirationDate `json:"Date,omitempty"` + StorageClass string `json:"StorageClass,omitempty"` + Days *ExpirationDays `json:"Days"` + } + + newt := transition{ + StorageClass: t.StorageClass, + } + + if !t.IsDateNull() { + newt.Date = &t.Date + } else { + newt.Days = &t.Days + } + return json.Marshal(newt) +} + +// IsDaysNull returns true if days field is null +func (t Transition) IsDaysNull() bool { + return t.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (t Transition) IsDateNull() bool { + return t.Date.IsZero() +} + +// IsNull returns true if no storage-class is set. +func (t Transition) IsNull() bool { + return t.StorageClass == "" +} + +// MarshalXML is transition is non null +func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if t.IsNull() { + return nil + } + type transitionWrapper Transition + return en.EncodeElement(transitionWrapper(t), startElement) +} + +// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter +type And struct { + XMLName xml.Name `xml:"And" json:"-"` + Prefix string `xml:"Prefix" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag" json:"Tags,omitempty"` + ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` + ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` +} + +// IsEmpty returns true if Tags field is null +func (a And) IsEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" && + a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0 +} + +// Filter will be used in selecting rule(s) for lifecycle configuration +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` + ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` + ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` +} + +// IsNull returns true if all Filter fields are empty. +func (f Filter) IsNull() bool { + return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" && + f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0 +} + +// MarshalJSON customizes json encoding by removing empty values. +func (f Filter) MarshalJSON() ([]byte, error) { + type filter struct { + And *And `json:"And,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Tag *Tag `json:"Tag,omitempty"` + ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"` + ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"` + } + + newf := filter{ + Prefix: f.Prefix, + } + if !f.Tag.IsEmpty() { + newf.Tag = &f.Tag + } + if !f.And.IsEmpty() { + newf.And = &f.And + } + newf.ObjectSizeLessThan = f.ObjectSizeLessThan + newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan + return json.Marshal(newf) +} + +// MarshalXML - produces the xml representation of the Filter struct +// only one of Prefix, And and Tag should be present in the output. +func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + switch { + case !f.And.IsEmpty(): + if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { + return err + } + case !f.Tag.IsEmpty(): + if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { + return err + } + default: + if f.ObjectSizeLessThan > 0 { + if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil { + return err + } + break + } + if f.ObjectSizeGreaterThan > 0 { + if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil { + return err + } + break + } + // Print empty Prefix field only when everything else is empty + if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// ExpirationDays is a type alias to unmarshal Days in Expiration +type ExpirationDays int + +// MarshalXML encodes number of days to expire if it is non-zero and +// encodes empty string otherwise +func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDays == 0 { + return nil + } + return e.EncodeElement(int(eDays), startElement) +} + +// ExpirationDate is a embedded type containing time.Time to unmarshal +// Date in Expiration +type ExpirationDate struct { + time.Time +} + +// MarshalXML encodes expiration date if it is non-zero and encodes +// empty string otherwise +func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDate.IsZero() { + return nil + } + return e.EncodeElement(eDate.Format(time.RFC3339), startElement) +} + +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker ExpirationBoolean + +// IsEnabled returns true if the auto delete-marker expiration is enabled +func (e ExpireDeleteMarker) IsEnabled() bool { + return bool(e) +} + +// ExpirationBoolean represents an XML version of 'bool' type +type ExpirationBoolean bool + +// MarshalXML encodes delete marker boolean into an XML form. +func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !b { + return nil + } + type booleanWrapper ExpirationBoolean + return e.EncodeElement(booleanWrapper(b), startElement) +} + +// IsEnabled returns true if the expiration boolean is enabled +func (b ExpirationBoolean) IsEnabled() bool { + return bool(b) +} + +// Expiration structure - expiration details of lifecycle configuration +type Expiration struct { + XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"` + DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"` +} + +// MarshalJSON customizes json encoding by removing empty day/date specification. +func (e Expiration) MarshalJSON() ([]byte, error) { + type expiration struct { + Date *ExpirationDate `json:"Date,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"` + DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"` + } + + newexp := expiration{ + DeleteMarker: e.DeleteMarker, + DeleteAll: e.DeleteAll, + } + if !e.IsDaysNull() { + newexp.Days = &e.Days + } + if !e.IsDateNull() { + newexp.Date = &e.Date + } + return json.Marshal(newexp) +} + +// IsDaysNull returns true if days field is null +func (e Expiration) IsDaysNull() bool { + return e.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (e Expiration) IsDateNull() bool { + return e.Date.IsZero() +} + +// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled +func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { + return e.DeleteMarker.IsEnabled() +} + +// IsNull returns true if both date and days fields are null +func (e Expiration) IsNull() bool { + return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() && !e.DeleteAll.IsEnabled() +} + +// MarshalXML is expiration is non null +func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if e.IsNull() { + return nil + } + type expirationWrapper Expiration + return en.EncodeElement(expirationWrapper(e), startElement) +} + +// DelMarkerExpiration represents DelMarkerExpiration actions element in an ILM policy +type DelMarkerExpiration struct { + XMLName xml.Name `xml:"DelMarkerExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` +} + +// IsNull returns true if Days isn't specified and false otherwise. +func (de DelMarkerExpiration) IsNull() bool { + return de.Days == 0 +} + +// MarshalXML avoids serializing an empty DelMarkerExpiration element +func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if de.IsNull() { + return nil + } + type delMarkerExp DelMarkerExpiration + return enc.EncodeElement(delMarkerExp(de), start) +} + +// AllVersionsExpiration represents AllVersionsExpiration actions element in an ILM policy +type AllVersionsExpiration struct { + XMLName xml.Name `xml:"AllVersionsExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"DeleteMarker,omitempty" json:"DeleteMarker,omitempty"` +} + +// IsNull returns true if days field is 0 +func (e AllVersionsExpiration) IsNull() bool { + return e.Days == 0 +} + +// MarshalXML satisfies xml.Marshaler to provide custom encoding +func (e AllVersionsExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if e.IsNull() { + return nil + } + type allVersionsExp AllVersionsExpiration + return enc.EncodeElement(allVersionsExp(e), start) +} + +// MarshalJSON customizes json encoding by omitting empty values +func (r Rule) MarshalJSON() ([]byte, error) { + type rule struct { + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration *Expiration `json:"Expiration,omitempty"` + DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"` + AllVersionsExpiration *AllVersionsExpiration `json:"AllVersionsExpiration,omitempty"` + ID string `json:"ID"` + RuleFilter *Filter `json:"Filter,omitempty"` + NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Status string `json:"Status"` + Transition *Transition `json:"Transition,omitempty"` + } + newr := rule{ + Prefix: r.Prefix, + Status: r.Status, + ID: r.ID, + } + + if !r.RuleFilter.IsNull() { + newr.RuleFilter = &r.RuleFilter + } + if !r.AbortIncompleteMultipartUpload.IsDaysNull() { + newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload + } + if !r.Expiration.IsNull() { + newr.Expiration = &r.Expiration + } + if !r.DelMarkerExpiration.IsNull() { + newr.DelMarkerExpiration = &r.DelMarkerExpiration + } + if !r.Transition.IsNull() { + newr.Transition = &r.Transition + } + if !r.NoncurrentVersionExpiration.isNull() { + newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration + } + if !r.NoncurrentVersionTransition.isNull() { + newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition + } + if !r.AllVersionsExpiration.IsNull() { + newr.AllVersionsExpiration = &r.AllVersionsExpiration + } + + return json.Marshal(newr) +} + +// Rule represents a single rule in lifecycle configuration +type Rule struct { + XMLName xml.Name `xml:"Rule,omitempty" json:"-"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"` + AllVersionsExpiration AllVersionsExpiration `xml:"AllVersionsExpiration,omitempty" json:"AllVersionsExpiration,omitempty"` + ID string `xml:"ID" json:"ID"` + RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Status string `xml:"Status" json:"Status"` + Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` +} + +// Configuration is a collection of Rule objects. +type Configuration struct { + XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` + Rules []Rule `xml:"Rule"` +} + +// Empty check if lifecycle configuration is empty +func (c *Configuration) Empty() bool { + if c == nil { + return true + } + return len(c.Rules) == 0 +} + +// NewConfiguration initializes a fresh lifecycle configuration +// for manipulation, such as setting and removing lifecycle rules +// and filters. +func NewConfiguration() *Configuration { + return &Configuration{} +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go new file mode 100644 index 000000000000..126661a9e68e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + ContentType string `json:"contentType,omitempty"` + UserMetadata map[string]string `json:"userMetadata,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// Event represents an Amazon an S3 bucket notification event. +type Event struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// Info - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type Info struct { + Records []Event + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go new file mode 100644 index 000000000000..31f29bcb1047 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -0,0 +1,438 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +import ( + "encoding/xml" + "errors" + "fmt" + "strings" + + "github.com/minio/minio-go/v7/pkg/set" +) + +// EventType is a S3 notification event associated to the bucket notification configuration +type EventType string + +// The role of all event types are described in : +// +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll EventType = "s3:ObjectCreated:*" + ObjectCreatedPut EventType = "s3:ObjectCreated:Put" + ObjectCreatedPost EventType = "s3:ObjectCreated:Post" + ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy" + ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging" + ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold" + ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention" + ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging" + ObjectAccessedGet EventType = "s3:ObjectAccessed:Get" + ObjectAccessedHead EventType = "s3:ObjectAccessed:Head" + ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention" + ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold" + ObjectAccessedAll EventType = "s3:ObjectAccessed:*" + ObjectRemovedAll EventType = "s3:ObjectRemoved:*" + ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" + ILMDelMarkerExpirationDelete EventType = "s3:LifecycleDelMarkerExpiration:Delete" + ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" + ObjectTransitionAll EventType = "s3:ObjectTransition:*" + ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed" + ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete" + ObjectTransitionPost EventType = "s3:ObjectRestore:Post" + ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed" + ObjectReplicationAll EventType = "s3:Replication:*" + ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication" + ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication" + ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold" + ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked" + ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold" + ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions" + ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix" + ObjectScannerAll EventType = "s3:Scanner:*" + BucketCreatedAll EventType = "s3:BucketCreated:*" + BucketRemovedAll EventType = "s3:BucketRemoved:*" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{ + Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource, + } +} + +var ( + // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' + ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") + // ErrInvalidArnFormat is returned when ARN string format is not valid + ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") +) + +// NewArnFromString parses string representation of ARN into Arn object. +// Returns an error if the string format is incorrect. +func NewArnFromString(arn string) (Arn, error) { + parts := strings.Split(arn, ":") + if len(parts) != 6 { + return Arn{}, ErrInvalidArnFormat + } + if parts[0] != "arn" { + return Arn{}, ErrInvalidArnPrefix + } + + return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil +} + +// String returns the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// Config - represents one single notification configuration +// such as topic, queue or lambda configuration. +type Config struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []EventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewConfig creates one notification config and sets the given ARN +func NewConfig(arn Arn) Config { + return Config{Arn: arn, Filter: &Filter{}} +} + +// AddEvents adds one event to the current notification config +func (t *Config) AddEvents(events ...EventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *Config) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *Config) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// EqualEventTypeList tells whether a and b contain the same events +func EqualEventTypeList(a, b []EventType) bool { + if len(a) != len(b) { + return false + } + setA := set.NewStringSet() + for _, i := range a { + setA.Add(string(i)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(string(i)) + } + + return setA.Difference(setB).IsEmpty() +} + +// EqualFilterRuleList tells whether a and b contain the same filters +func EqualFilterRuleList(a, b []FilterRule) bool { + if len(a) != len(b) { + return false + } + + setA := set.NewStringSet() + for _, i := range a { + setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + return setA.Difference(setB).IsEmpty() +} + +// Equal returns whether this `Config` is equal to another defined by the passed parameters +func (t *Config) Equal(events []EventType, prefix, suffix string) bool { + if t == nil { + return false + } + + // Compare events + passEvents := EqualEventTypeList(t.Events, events) + + // Compare filters + var newFilterRules []FilterRule + if prefix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) + } + if suffix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) + } + + var currentFilterRules []FilterRule + if t.Filter != nil { + currentFilterRules = t.Filter.S3Key.FilterRules + } + + passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) + return passEvents && passFilters +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + Config + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + Config + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + Config + Lambda string `xml:"CloudFunction"` +} + +// Configuration - the struct that represents the whole XML to be sent to the web service +type Configuration struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *Configuration) AddTopic(topicConfig Config) bool { + newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *Configuration) AddQueue(queueConfig Config) bool { + newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *Configuration) AddLambda(lambdaConfig Config) bool { + newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *Configuration) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete +var ErrNoConfigMatch = errors.New("no notification configuration matched") + +// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.TopicConfigs { + // if it matches events and filters, mark the index for deletion + if v.Topic == arn.String() && v.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *Configuration) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.QueueConfigs { + // if it matches events and filters, mark the index for deletion + if v.Queue == arn.String() && v.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *Configuration) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} + +// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.LambdaConfigs { + // if it matches events and filters, mark the index for deletion + if v.Lambda == arn.String() && v.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go new file mode 100644 index 000000000000..cc17a3531f83 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -0,0 +1,1039 @@ +/* + * MinIO Client (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package replication + +import ( + "bytes" + "encoding/xml" + "fmt" + "math" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/rs/xid" +) + +var errInvalidFilter = fmt.Errorf("invalid filter") + +// OptionType specifies operation to be performed on config +type OptionType string + +const ( + // AddOption specifies addition of rule to config + AddOption OptionType = "Add" + // SetOption specifies modification of existing rule to config + SetOption OptionType = "Set" + + // RemoveOption specifies rule options are for removing a rule + RemoveOption OptionType = "Remove" + // ImportOption is for getting current config + ImportOption OptionType = "Import" +) + +// Options represents options to set a replication configuration rule +type Options struct { + Op OptionType + RoleArn string + ID string + Prefix string + RuleStatus string + Priority string + TagString string + StorageClass string + DestBucket string + IsTagSet bool + IsSCSet bool + ReplicateDeletes string // replicate versioned deletes + ReplicateDeleteMarkers string // replicate soft deletes + ReplicaSync string // replicate replica metadata modifications + ExistingObjectReplicate string +} + +// Tags returns a slice of tags for a rule +func (opts Options) Tags() ([]Tag, error) { + var tagList []Tag + tagTokens := strings.Split(opts.TagString, "&") + for _, tok := range tagTokens { + if tok == "" { + break + } + kv := strings.SplitN(tok, "=", 2) + if len(kv) != 2 { + return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") + } + tagList = append(tagList, Tag{ + Key: kv[0], + Value: kv[1], + }) + } + return tagList, nil +} + +// Config - replication configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type Config struct { + XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` + Rules []Rule `xml:"Rule" json:"Rules"` + Role string `xml:"Role" json:"Role"` +} + +// Empty returns true if config is not set +func (c *Config) Empty() bool { + return len(c.Rules) == 0 +} + +// AddRule adds a new rule to existing replication config. If a rule exists with the +// same ID, then the rule is replaced. +func (c *Config) AddRule(opts Options) error { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite + if opts.RoleArn != "" { + tokens := strings.Split(opts.RoleArn, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) + } + switch { + case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: + c.Role = opts.RoleArn + compatSw = true + case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): + c.Role = opts.RoleArn + default: + return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) + } + } + + var status Status + // toggle rule status for edit option + switch opts.RuleStatus { + case "enable": + status = Enabled + case "disable": + status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + + tags, err := opts.Tags() + if err != nil { + return err + } + andVal := And{ + Tags: tags, + } + filter := Filter{Prefix: opts.Prefix} + // only a single tag is set. + if opts.Prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || opts.Prefix != "" { + filter.And = andVal + filter.And.Prefix = opts.Prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + if opts.ID == "" { + opts.ID = xid.New().String() + } + + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { + if len(btokens) == 1 && compatSw { + destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) + } else { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + } + dmStatus := Disabled + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + dmStatus = Enabled + case "disable": + dmStatus = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") + } + } + + vDeleteStatus := Disabled + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + vDeleteStatus = Enabled + case "disable": + vDeleteStatus = Disabled + default: + return fmt.Errorf("ReplicateDeletes should be either enable|disable") + } + } + var replicaSync Status + // replica sync is by default Enabled, unless specified. + switch opts.ReplicaSync { + case "enable", "": + replicaSync = Enabled + case "disable": + replicaSync = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + + var existingStatus Status + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + existingStatus = Enabled + case "disable", "": + existingStatus = Disabled + default: + return fmt.Errorf("existingObjectReplicate should be either enable|disable") + } + } + newRule := Rule{ + ID: opts.ID, + Priority: priority, + Status: status, + Filter: filter, + Destination: Destination{ + Bucket: destBucket, + StorageClass: opts.StorageClass, + }, + DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, + DeleteReplication: DeleteReplication{Status: vDeleteStatus}, + // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow + // automatic failover as the expectation in this case is that replica and source should be identical. + // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html + SourceSelectionCriteria: SourceSelectionCriteria{ + ReplicaModifications: ReplicaModifications{ + Status: replicaSync, + }, + }, + // By default disable existing object replication unless selected + ExistingObjectReplication: ExistingObjectReplication{ + Status: existingStatus, + }, + } + + // validate rule after overlaying priority for pre-existing rule being disabled. + if err := newRule.Validate(); err != nil { + return err + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + + for _, rule := range c.Rules { + if rule.Priority == newRule.Priority { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.ID == newRule.ID { + return fmt.Errorf("a rule exists with this ID") + } + } + + c.Rules = append(c.Rules, newRule) + return nil +} + +// EditRule modifies an existing rule in replication config +func (c *Config) EditRule(opts Options) error { + if opts.ID == "" { + return fmt.Errorf("rule ID missing") + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + + rIdx := -1 + var newRule Rule + for i, rule := range c.Rules { + if rule.ID == opts.ID { + rIdx = i + newRule = rule + break + } + } + if rIdx < 0 { + return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) + } + prefixChg := opts.Prefix != newRule.Prefix() + if opts.IsTagSet || prefixChg { + prefix := newRule.Prefix() + if prefix != opts.Prefix { + prefix = opts.Prefix + } + tags := []Tag{newRule.Filter.Tag} + if len(newRule.Filter.And.Tags) != 0 { + tags = newRule.Filter.And.Tags + } + var err error + if opts.IsTagSet { + tags, err = opts.Tags() + if err != nil { + return err + } + } + andVal := And{ + Tags: tags, + } + + filter := Filter{Prefix: prefix} + // only a single tag is set. + if prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || prefix != "" { + filter.And = andVal + filter.And.Prefix = prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + newRule.Filter = filter + } + + // toggle rule status for edit option + if opts.RuleStatus != "" { + switch opts.RuleStatus { + case "enable": + newRule.Status = Enabled + case "disable": + newRule.Status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + } + // set DeleteMarkerReplication rule status for edit option + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + newRule.DeleteMarkerReplication.Status = Enabled + case "disable": + newRule.DeleteMarkerReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") + } + } + + // set DeleteReplication rule status for edit option. This is a MinIO specific + // option to replicate versioned deletes + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + newRule.DeleteReplication.Status = Enabled + case "disable": + newRule.DeleteReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") + } + } + + if opts.ReplicaSync != "" { + switch opts.ReplicaSync { + case "enable", "": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled + case "disable": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + } + + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + newRule.ExistingObjectReplication.Status = Enabled + case "disable": + newRule.ExistingObjectReplication.Status = Disabled + default: + return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") + } + } + if opts.IsSCSet { + newRule.Destination.StorageClass = opts.StorageClass + } + if opts.Priority != "" { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + newRule.Priority = priority + } + if opts.DestBucket != "" { + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + newRule.Destination.Bucket = destBucket + } + // validate rule + if err := newRule.Validate(); err != nil { + return err + } + // ensure priority and destination bucket restrictions are not violated + for idx, rule := range c.Rules { + if rule.Priority == newRule.Priority && rIdx != idx { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { + if c.Role == newRule.Destination.Bucket { + continue + } + return fmt.Errorf("invalid destination bucket for this rule") + } + } + + c.Rules[rIdx] = newRule + return nil +} + +// RemoveRule removes a rule from replication config. +func (c *Config) RemoveRule(opts Options) error { + var newRules []Rule + ruleFound := false + for _, rule := range c.Rules { + if rule.ID != opts.ID { + newRules = append(newRules, rule) + continue + } + ruleFound = true + } + if !ruleFound { + return fmt.Errorf("Rule with ID %s not found", opts.ID) + } + if len(newRules) == 0 { + return fmt.Errorf("replication configuration should have at least one rule") + } + c.Rules = newRules + return nil +} + +// Rule - a rule for replication configuration. +type Rule struct { + XMLName xml.Name `xml:"Rule" json:"-"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Priority int `xml:"Priority"` + DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` + DeleteReplication DeleteReplication `xml:"DeleteReplication"` + Destination Destination `xml:"Destination"` + Filter Filter `xml:"Filter" json:"Filter"` + SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` +} + +// Validate validates the rule for correctness +func (r Rule) Validate() error { + if err := r.validateID(); err != nil { + return err + } + if err := r.validateStatus(); err != nil { + return err + } + if err := r.validateFilter(); err != nil { + return err + } + + if r.Priority < 0 && r.Status == Enabled { + return fmt.Errorf("priority must be set for the rule") + } + + if err := r.validateStatus(); err != nil { + return err + } + return r.ExistingObjectReplication.Validate() +} + +// validateID - checks if ID is valid or not. +func (r Rule) validateID() error { + // cannot be longer than 255 characters + if len(r.ID) > 255 { + return fmt.Errorf("ID must be less than 255 characters") + } + return nil +} + +// validateStatus - checks if status is valid or not. +func (r Rule) validateStatus() error { + // Status can't be empty + if len(r.Status) == 0 { + return fmt.Errorf("status cannot be empty") + } + + // Status must be one of Enabled or Disabled + if r.Status != Enabled && r.Status != Disabled { + return fmt.Errorf("status must be set to either Enabled or Disabled") + } + return nil +} + +func (r Rule) validateFilter() error { + return r.Filter.Validate() +} + +// Prefix - a rule can either have prefix under or under +// . This method returns the prefix from the +// location where it is available +func (r Rule) Prefix() string { + if r.Filter.Prefix != "" { + return r.Filter.Prefix + } + return r.Filter.And.Prefix +} + +// Tags - a rule can either have tag under or under +// . This method returns all the tags from the +// rule in the format tag1=value1&tag2=value2 +func (r Rule) Tags() string { + ts := []Tag{r.Filter.Tag} + if len(r.Filter.And.Tags) != 0 { + ts = r.Filter.And.Tags + } + + var buf bytes.Buffer + for _, t := range ts { + if buf.Len() > 0 { + buf.WriteString("&") + } + buf.WriteString(t.String()) + } + return buf.String() +} + +// Filter - a filter for a replication configuration Rule. +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + Prefix string `json:"Prefix,omitempty"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// Validate - validates the filter element +func (f Filter) Validate() error { + // A Filter must have exactly one of Prefix, Tag, or And specified. + if !f.And.isEmpty() { + if f.Prefix != "" { + return errInvalidFilter + } + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if f.Prefix != "" { + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if !f.Tag.IsEmpty() { + if err := f.Tag.Validate(); err != nil { + return err + } + } + return nil +} + +// Tag - a tag for a replication configuration Rule filter. +type Tag struct { + XMLName xml.Name `json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +func (tag Tag) String() string { + if tag.IsEmpty() { + return "" + } + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { + return fmt.Errorf("invalid Tag Key") + } + + if utf8.RuneCountInString(tag.Value) > 256 { + return fmt.Errorf("invalid Tag Value") + } + return nil +} + +// Destination - destination in ReplicationConfiguration. +type Destination struct { + XMLName xml.Name `xml:"Destination" json:"-"` + Bucket string `xml:"Bucket" json:"Bucket"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` +} + +// And - a tag to combine a prefix and multiple tags for replication configuration rule. +type And struct { + XMLName xml.Name `xml:"And,omitempty" json:"-"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// isEmpty returns true if Tags field is null +func (a And) isEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Status represents Enabled/Disabled status +type Status string + +// Supported status types +const ( + Enabled Status = "Enabled" + Disabled Status = "Disabled" +) + +// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type DeleteMarkerReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (d DeleteMarkerReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// DeleteReplication - whether versioned deletes are replicated - this +// is a MinIO specific extension +type DeleteReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteReplication is not set +func (d DeleteReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// ReplicaModifications specifies if replica modification sync is enabled +type ReplicaModifications struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default +} + +// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. +type SourceSelectionCriteria struct { + ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` +} + +// IsValid - checks whether SourceSelectionCriteria is valid or not. +func (s SourceSelectionCriteria) IsValid() bool { + return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled +} + +// Validate source selection criteria +func (s SourceSelectionCriteria) Validate() error { + if (s == SourceSelectionCriteria{}) { + return nil + } + if !s.IsValid() { + return fmt.Errorf("invalid ReplicaModification status") + } + return nil +} + +// ExistingObjectReplication - whether existing object replication is enabled +type ExistingObjectReplication struct { + Status Status `xml:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (e ExistingObjectReplication) IsEmpty() bool { + return len(e.Status) == 0 +} + +// Validate validates whether the status is disabled. +func (e ExistingObjectReplication) Validate() error { + if e.IsEmpty() { + return nil + } + if e.Status != Disabled && e.Status != Enabled { + return fmt.Errorf("invalid ExistingObjectReplication status") + } + return nil +} + +// TargetMetrics represents inline replication metrics +// such as pending, failed and completed bytes in total for a bucket remote target +type TargetMetrics struct { + // Completed count + ReplicatedCount uint64 `json:"replicationCount,omitempty"` + // Completed size in bytes + ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` + // Bandwidth limit in bytes/sec for this target + BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` + // Current bandwidth used in bytes/sec for this target + CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` + // errors seen in replication in last minute, hour and total + Failed TimedErrStats `json:"failed,omitempty"` + // Deprecated fields + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Total Replica size in bytes + ReplicaSize uint64 `json:"replicaSize,omitempty"` + // Failed size in bytes + FailedSize uint64 `json:"failedReplicationSize,omitempty"` + // Total number of pending operations including metadata updates + PendingCount uint64 `json:"pendingReplicationCount,omitempty"` + // Total number of failed operations including metadata updates + FailedCount uint64 `json:"failedReplicationCount,omitempty"` +} + +// Metrics represents inline replication metrics for a bucket. +type Metrics struct { + Stats map[string]TargetMetrics + // Completed size in bytes across targets + ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` + // Total Replica size in bytes across targets + ReplicaSize uint64 `json:"replicaSize,omitempty"` + // Total Replica counts + ReplicaCount int64 `json:"replicaCount,omitempty"` + // Total Replicated count + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // errors seen in replication in last minute, hour and total + Errors TimedErrStats `json:"failed,omitempty"` + // Total number of entries that are queued for replication + QStats InQueueMetric `json:"queued"` + // Total number of entries that have replication in progress + InProgress InProgressMetric `json:"inProgress"` + // Deprecated fields + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Failed size in bytes across targets + FailedSize uint64 `json:"failedReplicationSize,omitempty"` + // Total number of pending operations including metadata updates across targets + PendingCount uint64 `json:"pendingReplicationCount,omitempty"` + // Total number of failed operations including metadata updates across targets + FailedCount uint64 `json:"failedReplicationCount,omitempty"` +} + +// RStat - has count and bytes for replication metrics +type RStat struct { + Count float64 `json:"count"` + Bytes int64 `json:"bytes"` +} + +// Add two RStat +func (r RStat) Add(r1 RStat) RStat { + return RStat{ + Count: r.Count + r1.Count, + Bytes: r.Bytes + r1.Bytes, + } +} + +// TimedErrStats holds error stats for a time period +type TimedErrStats struct { + LastMinute RStat `json:"lastMinute"` + LastHour RStat `json:"lastHour"` + Totals RStat `json:"totals"` +} + +// Add two TimedErrStats +func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { + return TimedErrStats{ + LastMinute: te.LastMinute.Add(o.LastMinute), + LastHour: te.LastHour.Add(o.LastHour), + Totals: te.Totals.Add(o.Totals), + } +} + +// ResyncTargetsInfo provides replication target information to resync replicated data. +type ResyncTargetsInfo struct { + Targets []ResyncTarget `json:"target,omitempty"` +} + +// ResyncTarget provides the replica resources and resetID to initiate resync replication. +type ResyncTarget struct { + Arn string `json:"arn"` + ResetID string `json:"resetid"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + // Status of resync operation + ResyncStatus string `json:"resyncStatus,omitempty"` + // Completed size in bytes + ReplicatedSize int64 `json:"completedReplicationSize,omitempty"` + // Failed size in bytes + FailedSize int64 `json:"failedReplicationSize,omitempty"` + // Total number of failed operations + FailedCount int64 `json:"failedReplicationCount,omitempty"` + // Total number of completed operations + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // Last bucket/object replicated. + Bucket string `json:"bucket,omitempty"` + Object string `json:"object,omitempty"` +} + +// XferStats holds transfer rate info for uploads/sec +type XferStats struct { + AvgRate float64 `json:"avgRate"` + PeakRate float64 `json:"peakRate"` + CurrRate float64 `json:"currRate"` +} + +// Merge two XferStats +func (x *XferStats) Merge(x1 XferStats) { + x.AvgRate += x1.AvgRate + x.PeakRate += x1.PeakRate + x.CurrRate += x1.CurrRate +} + +// QStat holds count and bytes for objects in replication queue +type QStat struct { + Count float64 `json:"count"` + Bytes float64 `json:"bytes"` +} + +// Add 2 QStat entries +func (q *QStat) Add(q1 QStat) { + q.Count += q1.Count + q.Bytes += q1.Bytes +} + +// InQueueMetric holds stats for objects in replication queue +type InQueueMetric struct { + Curr QStat `json:"curr" msg:"cq"` + Avg QStat `json:"avg" msg:"aq"` + Max QStat `json:"peak" msg:"pq"` +} + +// InProgressMetric holds stats for objects with replication in progress +type InProgressMetric InQueueMetric + +// MetricName name of replication metric +type MetricName string + +const ( + // Large is a metric name for large objects >=128MiB + Large MetricName = "Large" + // Small is a metric name for objects <128MiB size + Small MetricName = "Small" + // Total is a metric name for total objects + Total MetricName = "Total" +) + +// WorkerStat has stats on number of replication workers +type WorkerStat struct { + Curr int32 `json:"curr"` + Avg float32 `json:"avg"` + Max int32 `json:"max"` +} + +// TgtHealth holds health status of a target +type TgtHealth struct { + Online bool `json:"online"` + LastOnline time.Time `json:"lastOnline"` + TotalDowntime time.Duration `json:"totalDowntime"` + OfflineCount int64 `json:"offlineCount"` +} + +// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes +// and number of entries that failed replication after 3 retries +type ReplMRFStats struct { + LastFailedCount uint64 `json:"failedCount_last5min"` + // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` + // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` +} + +// ReplQNodeStats holds stats for a node in replication queue +type ReplQNodeStats struct { + NodeName string `json:"nodeName"` + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"workers"` + + XferStats map[MetricName]XferStats `json:"transferSummary"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` + + QStats InQueueMetric `json:"queueStats"` + InProgressStats InProgressMetric `json:"progressStats"` + + MRFStats ReplMRFStats `json:"mrfStats"` + Retries CounterSummary `json:"retries"` + Errors CounterSummary `json:"errors"` + TgtHealth map[string]TgtHealth `json:"tgtHealth,omitempty"` +} + +// CounterSummary denotes the stats counter summary +type CounterSummary struct { + // Counted last 1hr + Last1hr uint64 `json:"last1hr"` + // Counted last 1m + Last1m uint64 `json:"last1m"` + // Total counted since uptime + Total uint64 `json:"total"` +} + +// ReplQueueStats holds stats for replication queue across nodes +type ReplQueueStats struct { + Nodes []ReplQNodeStats `json:"nodes"` +} + +// Workers returns number of workers across all nodes +func (q ReplQueueStats) Workers() (tot WorkerStat) { + for _, node := range q.Nodes { + tot.Avg += node.Workers.Avg + tot.Curr += node.Workers.Curr + if tot.Max < node.Workers.Max { + tot.Max = node.Workers.Max + } + } + if len(q.Nodes) > 0 { + tot.Avg /= float32(len(q.Nodes)) + tot.Curr /= int32(len(q.Nodes)) + } + return tot +} + +// qStatSummary returns cluster level stats for objects in replication queue +func (q ReplQueueStats) qStatSummary() InQueueMetric { + m := InQueueMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.QStats.Avg) + m.Curr.Add(v.QStats.Curr) + if m.Max.Count < v.QStats.Max.Count { + m.Max.Add(v.QStats.Max) + } + } + return m +} + +// inProgressSummary returns cluster level stats for objects with replication in progress +func (q ReplQueueStats) inProgressSummary() InProgressMetric { + m := InProgressMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.InProgressStats.Avg) + m.Curr.Add(v.InProgressStats.Curr) + if m.Max.Count < v.InProgressStats.Max.Count { + m.Max.Add(v.InProgressStats.Max) + } + } + return m +} + +// ReplQStats holds stats for objects in replication queue +type ReplQStats struct { + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"workers"` + + XferStats map[MetricName]XferStats `json:"xferStats"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` + + QStats InQueueMetric `json:"qStats"` + InProgressStats InProgressMetric `json:"progressStats"` + + MRFStats ReplMRFStats `json:"mrfStats"` + Retries CounterSummary `json:"retries"` + Errors CounterSummary `json:"errors"` +} + +// QStats returns cluster level stats for objects in replication queue +func (q ReplQueueStats) QStats() (r ReplQStats) { + r.QStats = q.qStatSummary() + r.InProgressStats = q.inProgressSummary() + r.XferStats = make(map[MetricName]XferStats) + r.TgtXferStats = make(map[string]map[MetricName]XferStats) + r.Workers = q.Workers() + for _, node := range q.Nodes { + for arn := range node.TgtXferStats { + xmap, ok := node.TgtXferStats[arn] + if !ok { + xmap = make(map[MetricName]XferStats) + } + for m, v := range xmap { + st, ok := r.XferStats[m] + if !ok { + st = XferStats{} + } + st.AvgRate += v.AvgRate + st.CurrRate += v.CurrRate + st.PeakRate = math.Max(st.PeakRate, v.PeakRate) + if _, ok := r.TgtXferStats[arn]; !ok { + r.TgtXferStats[arn] = make(map[MetricName]XferStats) + } + r.TgtXferStats[arn][m] = st + } + } + for k, v := range node.XferStats { + st, ok := r.XferStats[k] + if !ok { + st = XferStats{} + } + st.AvgRate += v.AvgRate + st.CurrRate += v.CurrRate + st.PeakRate = math.Max(st.PeakRate, v.PeakRate) + r.XferStats[k] = st + } + r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount + r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount + r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes + r.Retries.Last1hr += node.Retries.Last1hr + r.Retries.Last1m += node.Retries.Last1m + r.Retries.Total += node.Retries.Total + r.Errors.Last1hr += node.Errors.Last1hr + r.Errors.Last1m += node.Errors.Last1m + r.Errors.Total += node.Errors.Total + r.Uptime += node.Uptime + } + if len(q.Nodes) > 0 { + r.Uptime /= int64(len(q.Nodes)) // average uptime + } + return r +} + +// MetricsV2 represents replication metrics for a bucket. +type MetricsV2 struct { + Uptime int64 `json:"uptime"` + CurrentStats Metrics `json:"currStats"` + QueueStats ReplQueueStats `json:"queueStats"` + DowntimeInfo map[string]DowntimeInfo `json:"downtimeInfo"` +} + +// DowntimeInfo represents the downtime info +type DowntimeInfo struct { + Duration Stat `json:"duration"` + Count Stat `json:"count"` +} + +// Stat represents the aggregates +type Stat struct { + Total int64 `json:"total"` + Avg int64 `json:"avg"` + Max int64 `json:"max"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go new file mode 100644 index 000000000000..7427c13de8e5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -0,0 +1,457 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "errors" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start with a "." + if host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { + return parts[1] + } + + parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostExpress.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostExpressControl.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Hostname()) + if len(parts) > 1 { + if strings.HasPrefix(parts[1], "xpress-") { + return "" + } + if strings.HasPrefix(parts[1], "dualstack.") || strings.HasPrefix(parts[1], "control.") || strings.HasPrefix(parts[1], "website-") { + return "" + } + return parts[1] + } + + return "" +} + +// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. +func IsAliyunOSSEndpoint(endpointURL url.URL) bool { + return strings.HasSuffix(endpointURL.Hostname(), "aliyuncs.com") +} + +// IsAmazonExpressRegionalEndpoint Match if the endpoint is S3 Express regional endpoint. +func IsAmazonExpressRegionalEndpoint(endpointURL url.URL) bool { + return amazonS3HostExpressControl.MatchString(endpointURL.Hostname()) +} + +// IsAmazonExpressZonalEndpoint Match if the endpoint is S3 Express zonal endpoint. +func IsAmazonExpressZonalEndpoint(endpointURL url.URL) bool { + return amazonS3HostExpress.MatchString(endpointURL.Hostname()) +} + +// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. +func IsAmazonEndpoint(endpointURL url.URL) bool { + if endpointURL.Hostname() == "s3-external-1.amazonaws.com" || endpointURL.Hostname() == "s3.amazonaws.com" { + return true + } + return GetRegionFromURL(endpointURL) != "" +} + +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Hostname(), "us-gov-") +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return strings.HasPrefix(endpointURL.Hostname(), "s3-fips") && strings.HasSuffix(endpointURL.Hostname(), ".amazonaws.com") +} + +// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint +// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html. +func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return amazonS3HostPrivateLink.MatchString(endpointURL.Hostname()) +} + +// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. +func IsGoogleEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Hostname() == "storage.googleapis.com" +} + +// Expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.ReplaceAll(s, "/", "%2F") +} + +// QueryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func QueryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(EncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname strings.Builder + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + default: + l := utf8.RuneLen(s) + if l < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, l) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname.WriteString("%" + strings.ToUpper(hex)) + } + } + } + return encodedPathname.String() +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + validBucketNameS3Express = regexp.MustCompile(`^[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]--[a-z0-9]{3,7}-az[1-6]--x-s3$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be shorter than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be longer than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// IsS3ExpressBucket is S3 express bucket? +func IsS3ExpressBucket(bucketName string) bool { + return CheckValidBucketNameS3Express(bucketName) == nil +} + +// CheckValidBucketNameS3Express - checks if we have a valid input bucket name for S3 Express. +func CheckValidBucketNameS3Express(bucketName string) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty for S3 Express") + } + + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be shorter than 3 characters for S3 Express") + } + + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be longer than 63 characters for S3 Express") + } + + // Check if the bucket matches the regex + if !validBucketNameS3Express.MatchString(bucketName) { + return errors.New("Bucket name contains invalid characters") + } + + // Extract bucket name (before ----x-s3) + parts := strings.Split(bucketName, "--") + if len(parts) != 3 || parts[2] != "x-s3" { + return errors.New("Bucket name pattern is wrong 'x-s3'") + } + bucketName = parts[0] + + // Additional validation for bucket name + // 1. No consecutive periods or hyphens + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, "--") { + return errors.New("Bucket name contains invalid characters") + } + + // 2. No period-hyphen or hyphen-period + if strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name has unexpected format or contains invalid characters") + } + + // 3. No IP address format (e.g., 192.168.0.1) + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + + return nil +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be longer than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go b/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go new file mode 100644 index 000000000000..7d3c3620bbbf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/msgp.go @@ -0,0 +1,149 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2025 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import "github.com/tinylib/msgp/msgp" + +// EncodeMsg encodes the message to the writer. +// Values are stored as a slice of strings or nil. +func (s StringSet) EncodeMsg(writer *msgp.Writer) error { + if s == nil { + return writer.WriteNil() + } + err := writer.WriteArrayHeader(uint32(len(s))) + if err != nil { + return err + } + sorted := s.ToByteSlices() + for _, k := range sorted { + err = writer.WriteStringFromBytes(k) + if err != nil { + return err + } + } + return nil +} + +// MarshalMsg encodes the message to the bytes. +// Values are stored as a slice of strings or nil. +func (s StringSet) MarshalMsg(bytes []byte) ([]byte, error) { + if s == nil { + return msgp.AppendNil(bytes), nil + } + if len(s) == 0 { + return msgp.AppendArrayHeader(bytes, 0), nil + } + bytes = msgp.AppendArrayHeader(bytes, uint32(len(s))) + sorted := s.ToByteSlices() + for _, k := range sorted { + bytes = msgp.AppendStringFromBytes(bytes, k) + } + return bytes, nil +} + +// DecodeMsg decodes the message from the reader. +func (s *StringSet) DecodeMsg(reader *msgp.Reader) error { + if reader.IsNil() { + *s = nil + return reader.Skip() + } + sz, err := reader.ReadArrayHeader() + if err != nil { + return err + } + dst := *s + if dst == nil { + dst = make(StringSet, sz) + } else { + for k := range dst { + delete(dst, k) + } + } + for i := uint32(0); i < sz; i++ { + var k string + k, err = reader.ReadString() + if err != nil { + return err + } + dst[k] = struct{}{} + } + *s = dst + return nil +} + +// UnmarshalMsg decodes the message from the bytes. +func (s *StringSet) UnmarshalMsg(bytes []byte) ([]byte, error) { + if msgp.IsNil(bytes) { + *s = nil + return bytes[msgp.NilSize:], nil + } + // Read the array header + sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes) + if err != nil { + return nil, err + } + dst := *s + if dst == nil { + dst = make(StringSet, sz) + } else { + for k := range dst { + delete(dst, k) + } + } + for i := uint32(0); i < sz; i++ { + var k string + k, bytes, err = msgp.ReadStringBytes(bytes) + if err != nil { + return nil, err + } + dst[k] = struct{}{} + } + *s = dst + return bytes, nil +} + +// Msgsize returns the maximum size of the message. +func (s StringSet) Msgsize() int { + if s == nil { + return msgp.NilSize + } + if len(s) == 0 { + return msgp.ArrayHeaderSize + } + size := msgp.ArrayHeaderSize + for key := range s { + size += msgp.StringPrefixSize + len(key) + } + return size +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +func (s StringSet) MarshalBinary() ([]byte, error) { + return s.MarshalMsg(nil) +} + +// AppendBinary appends the binary representation of itself to the end of b +func (s StringSet) AppendBinary(b []byte) ([]byte, error) { + return s.MarshalMsg(b) +} + +// UnmarshalBinary decodes the binary representation of itself from b +func (s *StringSet) UnmarshalBinary(b []byte) error { + _, err := s.UnmarshalMsg(b) + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go new file mode 100644 index 000000000000..c12651b54438 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go @@ -0,0 +1,218 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "encoding/json" + "fmt" + "sort" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// ToByteSlices - returns StringSet as a sorted +// slice of byte slices, using only one allocation. +func (set StringSet) ToByteSlices() [][]byte { + length := 0 + for k := range set { + length += len(k) + } + // Preallocate the slice with the total length of all strings + // to avoid multiple allocations. + dst := make([]byte, length) + + // Add keys to this... + keys := make([][]byte, 0, len(set)) + for k := range set { + n := copy(dst, k) + keys = append(keys, dst[:n]) + dst = dst[n:] + } + sort.Slice(keys, func(i, j int) bool { + return string(keys[i]) < string(keys[j]) + }) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []interface{}{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(fmt.Sprintf("%v", s)) + } + } else { + var s interface{} + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(fmt.Sprintf("%v", s)) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet, len(sl)) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := make(StringSet, len(set)) + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 000000000000..e18002b8d53b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,223 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go new file mode 100644 index 000000000000..323c65a1b1cf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -0,0 +1,450 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + md5simd "github.com/minio/md5-simd" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + emptySHA256, + chunkChecksum, + } + + return strings.Join(stringToSignParts, "\n") +} + +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + chunkChecksum, + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + req.TransferEncoding = []string{"aws-chunked"} + } + + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkCheckSum) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkChecksum) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + sessionToken string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header + sh256 md5simd.Hasher +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { + // Compute chunk signature for next header + s.sh256.Reset() + s.sh256.Write(s.chunkBuf[:chunkLen]) + chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + + signature := buildChunkSignature(chunckChecksum, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.sh256.Reset() + s.sh256.Write(s.chunkBuf) + chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + // Compute chunk signature + signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request, serviceType string) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime, serviceType) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4Express - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4Express(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, +) *http.Request { + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + sh256: sh256, + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req, ServiceTypeS3Express) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, +) *http.Request { + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + sh256: sh256, + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req, ServiceTypeS3) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addSignedTrailer(s.trailer) + } + break + } + return 0, err + } + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + if s.sh256 != nil { + s.sh256.Close() + s.sh256 = nil + } + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go new file mode 100644 index 000000000000..d15c99ad78de --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -0,0 +1,319 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return path + } + } + path = s3utils.EncodePath(req.URL.Path) + return path +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + fmt.Fprintf(authHeader, "%s %s:", signV2Algorithm, accessKeyID) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } +} + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// +// This list should be kept alphabetically sorted, do not hastily edit. +var resourceList = []string{ + "acl", + "cors", + "delete", + "encryption", + "legal-hold", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "replication", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "retention", + "select", + "select-type", + "tagging", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + buf.WriteString(encodeURL2Path(&req, virtualHost)) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(vv[0]) + } + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go new file mode 100644 index 000000000000..423384b7e1ad --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -0,0 +1,395 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +// Different service types +const ( + ServiceTypeS3 = "s3" + ServiceTypeSTS = "sts" + ServiceTypeS3Express = "s3express" +) + +// Excerpts from @lsegal - +// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +// +// * User-Agent +// This is ignored from signing because signing this causes problems with generating pre-signed +// URLs (that are executed by other agents) or when customers pass requests through proxies, which +// may modify the user-agent. +// +// * Authorization +// Is skipped for obvious reasons. +// +// * Accept-Encoding +// Some S3 servers like Hitachi Content Platform do not honor this header for signature +// calculation. +var v4IgnoredHeaders = map[string]bool{ + "Accept-Encoding": true, + "Authorization": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte(serviceType)) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time, serviceType string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + serviceType, + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { + scope := getScope(location, t, serviceType) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + if !headerExists("host", headers) { + headers = append(headers, "host") + } + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch k { + case "host": + buf.WriteString(getHostAddr(&req)) + buf.WriteByte('\n') + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +func headerExists(key string, headers []string) bool { + for _, k := range headers { + if k == key { + return true + } + } + return false +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + if !headerExists("host", headers) { + headers = append(headers, "host") + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), + hashedPayload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" + stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + if v := req.Header.Get("x-amz-s3session-token"); v != "" { + query.Set("X-Amz-S3session-Token", sessionToken) + } else { + query.Set("X-Amz-Security-Token", sessionToken) + } + } + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4STS - signature v4 for STS request. +func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) +} + +// Internal function called for different service types. +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Set session token if available. + if sessionToken != "" { + // S3 Express token if not set then set sessionToken + // with older x-amz-security-token header. + if v := req.Header.Get("x-amz-s3session-token"); v == "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + } + + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.Header.Set("Content-Encoding", "aws-chunked") + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + + hashedPayload := getHashedPayload(req) + if serviceType == ServiceTypeSTS { + // Content sha256 header is not sent with the request + // but it is expected to have sha256 of payload for signature + // in STS service type request. + req.Header.Del("X-Amz-Content-Sha256") + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, serviceType) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, serviceType) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, t) + } + return &req +} + +// UnsignedTrailer will do chunked encoding with a custom trailer. +func UnsignedTrailer(req http.Request, trailer http.Header) *http.Request { + if len(trailer) == 0 { + return &req + } + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.Header.Set("Content-Encoding", "aws-chunked") + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, "", req.ContentLength, t) +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Express sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4Express(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3Express, nil) +} + +// SignV4TrailerExpress sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4TrailerExpress(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3Express, trailer) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go new file mode 100644 index 000000000000..87c99398913b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "crypto/hmac" + "crypto/sha256" + "net/http" + "strings" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getHostAddr returns host header if available, otherwise returns host from URL +func getHostAddr(req *http.Request) string { + host := req.Header.Get("host") + if host != "" && req.Host != host { + return host + } + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/singleflight/singleflight.go b/vendor/github.com/minio/minio-go/v7/pkg/singleflight/singleflight.go new file mode 100644 index 000000000000..49260327f289 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/singleflight/singleflight.go @@ -0,0 +1,217 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +// This is forked to provide type safety and have non-string keys. +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func (p *panicError) Unwrap() error { + err, ok := p.value.(error) + if !ok { + return nil + } + + return err +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack, '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call[V any] struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val V + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result[V] +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group[K comparable, V any] struct { + mu sync.Mutex // protects m + m map[K]*call[V] // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result[V any] struct { + Val V + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +// +//nolint:revive +func (g *Group[K, V]) Do(key K, fn func() (V, error)) (v V, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[K]*call[V]) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call[V]) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group[K, V]) DoChan(key K, fn func() (V, error)) <-chan Result[V] { + ch := make(chan Result[V], 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[K]*call[V]) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call[V]{chans: []chan<- Result[V]{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group[K, V]) doCall(c *call[V], key K, fn func() (V, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result[V]{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group[K, V]) Forget(key K) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go new file mode 100644 index 000000000000..b5fb9565aa50 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go @@ -0,0 +1,66 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sse + +import "encoding/xml" + +// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate +// KMS, SSEAlgoritm needs to be set to "aws:kms" +// Minio currently does not support Kms. +type ApplySSEByDefault struct { + KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + SSEAlgorithm string `xml:"SSEAlgorithm"` +} + +// Rule layer encapsulates default encryption configuration +type Rule struct { + Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Configuration is the default encryption configuration structure +type Configuration struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + Rules []Rule `xml:"Rule"` +} + +// NewConfigurationSSES3 initializes a new SSE-S3 configuration +func NewConfigurationSSES3() *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + SSEAlgorithm: "AES256", + }, + }, + }, + } +} + +// NewConfigurationSSEKMS initializes a new SSE-KMS configuration +func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + KmsMasterKeyID: kmsMasterKey, + SSEAlgorithm: "aws:kms", + }, + }, + }, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go new file mode 100644 index 000000000000..33465c6326de --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -0,0 +1,413 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tags + +import ( + "encoding/xml" + "io" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Error contains tag specific error. +type Error interface { + error + Code() string +} + +type errTag struct { + code string + message string +} + +// Code contains error code. +func (err errTag) Code() string { + return err.code +} + +// Error contains error message. +func (err errTag) Error() string { + return err.message +} + +var ( + errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} + errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} + errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} + errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} + errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} +) + +// Tag comes with limitation as per +// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +const ( + maxKeyLength = 128 + maxValueLength = 256 + maxObjectTagCount = 10 + maxTagCount = 50 +) + +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +// borrowed from this article and also testing various ASCII characters following regex +// is supported by AWS S3 for both tags and values. +var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ =]+$`) + +func checkKey(key string) error { + if len(key) == 0 { + return errInvalidTagKey + } + + if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { + return errInvalidTagKey + } + + return nil +} + +func checkValue(value string) error { + if value != "" { + if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { + return errInvalidTagValue + } + } + + return nil +} + +// Tag denotes key and value. +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +func (tag Tag) String() string { + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if err := checkKey(tag.Key); err != nil { + return err + } + + return checkValue(tag.Value) +} + +// MarshalXML encodes to XML data. +func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := tag.Validate(); err != nil { + return err + } + + type subTag Tag // to avoid recursively calling MarshalXML() + return e.EncodeElement(subTag(tag), start) +} + +// UnmarshalXML decodes XML data to tag. +func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type subTag Tag // to avoid recursively calling UnmarshalXML() + var st subTag + if err := d.DecodeElement(&st, &start); err != nil { + return err + } + + if err := Tag(st).Validate(); err != nil { + return err + } + + *tag = Tag(st) + return nil +} + +// tagSet represents list of unique tags. +type tagSet struct { + tagMap map[string]string + isObject bool +} + +func (tags tagSet) String() string { + if len(tags.tagMap) == 0 { + return "" + } + var buf strings.Builder + keys := make([]string, 0, len(tags.tagMap)) + for k := range tags.tagMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + keyEscaped := url.QueryEscape(k) + valueEscaped := url.QueryEscape(tags.tagMap[k]) + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(keyEscaped) + buf.WriteByte('=') + buf.WriteString(valueEscaped) + } + return buf.String() +} + +func (tags *tagSet) remove(key string) { + delete(tags.tagMap, key) +} + +func (tags *tagSet) set(key, value string, failOnExist bool) error { + if failOnExist { + if _, found := tags.tagMap[key]; found { + return errDuplicateTagKey + } + } + + if err := checkKey(key); err != nil { + return err + } + + if err := checkValue(value); err != nil { + return err + } + + if tags.isObject { + if len(tags.tagMap) == maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tags.tagMap) == maxTagCount { + return errTooManyTags + } + + tags.tagMap[key] = value + return nil +} + +func (tags tagSet) count() int { + return len(tags.tagMap) +} + +func (tags tagSet) toMap() map[string]string { + m := make(map[string]string, len(tags.tagMap)) + for key, value := range tags.tagMap { + m[key] = value + } + return m +} + +// MarshalXML encodes to XML data. +func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + tagList.Tags = make([]Tag, 0, len(tags.tagMap)) + for key, value := range tags.tagMap { + tagList.Tags = append(tagList.Tags, Tag{key, value}) + } + + return e.EncodeElement(tagList, start) +} + +// UnmarshalXML decodes XML data to tag list. +func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + if err := d.DecodeElement(&tagList, &start); err != nil { + return err + } + + if tags.isObject { + if len(tagList.Tags) > maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tagList.Tags) > maxTagCount { + return errTooManyTags + } + + m := make(map[string]string, len(tagList.Tags)) + for _, tag := range tagList.Tags { + if _, found := m[tag.Key]; found { + return errDuplicateTagKey + } + + m[tag.Key] = tag.Value + } + + tags.tagMap = m + return nil +} + +type tagging struct { + XMLName xml.Name `xml:"Tagging"` + TagSet *tagSet `xml:"TagSet"` +} + +// Tags is list of tags of XML request/response as per +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody +type Tags tagging + +func (tags Tags) String() string { + return tags.TagSet.String() +} + +// Remove removes a tag by its key. +func (tags *Tags) Remove(key string) { + tags.TagSet.remove(key) +} + +// Set sets new tag. +func (tags *Tags) Set(key, value string) error { + return tags.TagSet.set(key, value, false) +} + +// Count - return number of tags accounted for +func (tags Tags) Count() int { + return tags.TagSet.count() +} + +// ToMap returns copy of tags. +func (tags Tags) ToMap() map[string]string { + return tags.TagSet.toMap() +} + +// MapToObjectTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToObjectTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, true) +} + +// MapToBucketTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToBucketTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, false) +} + +// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. +func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key, value := range tagMap { + if err := tagging.TagSet.set(key, value, true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := xml.NewDecoder(reader).Decode(tagging); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseBucketXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. +func ParseBucketXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, false) +} + +// ParseObjectXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax +func ParseObjectXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, true) +} + +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (tags *tagSet) parseTags(tgs string) (err error) { + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if err = tags.set(key, value, true); err != nil { + return err + } + } + return err +} + +// Parse decodes HTTP query formatted string into tags which is limited by isObject. +// A query formatted string is like "key1=value1&key2=value2". +func Parse(s string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := tagging.TagSet.parseTags(s); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". +func ParseObjectTags(s string) (*Tags, error) { + return Parse(s, true) +} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go new file mode 100644 index 000000000000..d2899416c55a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -0,0 +1,443 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2023 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.000Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" { + return errInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +// Can use an empty value ("") to allow any key. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" { + return errInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + +// SetTagging - Sets tagging for the object for this policy based upload. +func (p *PostPolicy) SetTagging(tagging string) error { + if strings.TrimSpace(tagging) == "" { + return errInvalidArgument("No tagging specified.") + } + _, err := tags.ParseObjectXML(strings.NewReader(tagging)) + if err != nil { + return errors.New(s3ErrorResponseMap[MalformedXML]) //nolint + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$tagging", + value: tagging, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["tagging"] = tagging + return nil +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentTypeStartsWith - Sets what content-type of the object for this policy +// based upload can start with. +// Can use an empty value ("") to allow any content-type. +func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$Content-Type", + value: contentTypeStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentTypeStartsWith + return nil +} + +// SetContentDisposition - Sets content-disposition of the object for this policy +func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { + if strings.TrimSpace(contentDisposition) == "" { + return errInvalidArgument("No content disposition specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Disposition", + value: contentDisposition, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Disposition"] = contentDisposition + return nil +} + +// SetContentEncoding - Sets content-encoding of the object for this policy +func (p *PostPolicy) SetContentEncoding(contentEncoding string) error { + if strings.TrimSpace(contentEncoding) == "" { + return errInvalidArgument("No content encoding specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Encoding", + value: contentEncoding, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Encoding"] = contentEncoding + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(minLen, maxLen int64) error { + if minLen > maxLen { + return errInvalidArgument("Minimum limit is larger than maximum limit.") + } + if minLen < 0 { + return errInvalidArgument("Minimum limit cannot be negative.") + } + if maxLen <= 0 { + return errInvalidArgument("Maximum limit cannot be non-positive.") + } + p.contentLengthRange.min = minLen + p.contentLengthRange.max = maxLen + return nil +} + +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" { + return errInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key, value string) error { + if strings.TrimSpace(key) == "" { + return errInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserMetadataStartsWith - Set how an user metadata should starts with. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { + if strings.TrimSpace(key) == "" { + return errInvalidArgument("Key is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "starts-with", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetChecksum sets the checksum of the request. +func (p *PostPolicy) SetChecksum(c Checksum) error { + if c.IsSet() { + p.formData[amzChecksumAlgo] = c.Type.String() + p.formData[c.Type.Key()] = c.Encoded() + + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", amzChecksumAlgo), + value: c.Type.String(), + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + policyCond = policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", c.Type.Key()), + value: c.Encoded(), + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + } + return nil +} + +// SetEncryption - sets encryption headers for POST API +func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) { + if sse == nil { + return + } + h := http.Header{} + sse.Marshal(h) + for k, v := range h { + p.formData[k] = v[0] + } +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key, value string) error { + if key == "" { + return errInvalidArgument("Key is empty") + } + if value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +// Can use starts-with with an empty value ("") to allow any content within a form field. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" { + return errInvalidArgument("Policy fields are empty.") + } + if policyCond.matchType != "starts-with" && policyCond.value == "" { + return errInvalidArgument("Policy value is empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// String function for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshaled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.UTC().Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr += conditionsStr + retStr += "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshaled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go new file mode 100644 index 000000000000..21e9fd455e58 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -0,0 +1,65 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "iter" + "math" + "time" +) + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) + sleep := baseSleep * time.Duration(1< maxSleep { + sleep = maxSleep + } + if math.Abs(jitter-NoJitter) > 1e-9 { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + return func(yield func(int) bool) { + var nextBackoff int + for { + if !yield(nextBackoff) { + return + } + nextBackoff++ + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go new file mode 100644 index 000000000000..59c7a163d47c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -0,0 +1,156 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "crypto/x509" + "errors" + "iter" + "math" + "net/http" + "net/url" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 10 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 200 * time.Millisecond +var DefaultRetryUnit = 200 * time.Millisecond + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +var DefaultRetryCap = time.Second + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) iter.Seq[int] { + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) + sleep := baseSleep * time.Duration(1< maxSleep { + sleep = maxSleep + } + if math.Abs(jitter-NoJitter) > 1e-9 { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + return func(yield func(int) bool) { + // if context is already canceled, skip yield + select { + case <-ctx.Done(): + return + default: + } + + for i := range maxRetry { + if !yield(i) { + return + } + + select { + case <-time.After(exponentialBackoffWait(i)): + case <-ctx.Done(): + return + } + } + } +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "SlowDown": {}, + "SlowDownWrite": {}, + "SlowDownRead": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + http.StatusRequestTimeout: {}, + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, + 520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected. + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} + +// For now, all http Do() requests are retriable except some well defined errors +func isRequestErrorRetryable(ctx context.Context, err error) bool { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Retry if internal timeout in the HTTP call. + return ctx.Err() == nil + } + if ue, ok := err.(*url.Error); ok { + e := ue.Unwrap() + switch e.(type) { + // x509: certificate signed by unknown authority + case x509.UnknownAuthorityError: + return false + } + switch e.Error() { + case "http: server gave HTTP response to HTTPS client": + return false + } + } + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go new file mode 100644 index 000000000000..4bcc47d80a00 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go @@ -0,0 +1,116 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Constants for error keys +const ( + NoSuchBucket = "NoSuchBucket" + NoSuchKey = "NoSuchKey" + NoSuchUpload = "NoSuchUpload" + AccessDenied = "AccessDenied" + Conflict = "Conflict" + PreconditionFailed = "PreconditionFailed" + InvalidArgument = "InvalidArgument" + EntityTooLarge = "EntityTooLarge" + EntityTooSmall = "EntityTooSmall" + UnexpectedEOF = "UnexpectedEOF" + APINotSupported = "APINotSupported" + InvalidRegion = "InvalidRegion" + NoSuchBucketPolicy = "NoSuchBucketPolicy" + BadDigest = "BadDigest" + IncompleteBody = "IncompleteBody" + InternalError = "InternalError" + InvalidAccessKeyID = "InvalidAccessKeyId" + InvalidBucketName = "InvalidBucketName" + InvalidDigest = "InvalidDigest" + InvalidRange = "InvalidRange" + MalformedXML = "MalformedXML" + MissingContentLength = "MissingContentLength" + MissingContentMD5 = "MissingContentMD5" + MissingRequestBodyError = "MissingRequestBodyError" + NotImplemented = "NotImplemented" + RequestTimeTooSkewed = "RequestTimeTooSkewed" + SignatureDoesNotMatch = "SignatureDoesNotMatch" + MethodNotAllowed = "MethodNotAllowed" + InvalidPart = "InvalidPart" + InvalidPartOrder = "InvalidPartOrder" + InvalidObjectState = "InvalidObjectState" + AuthorizationHeaderMalformed = "AuthorizationHeaderMalformed" + MalformedPOSTRequest = "MalformedPOSTRequest" + BucketNotEmpty = "BucketNotEmpty" + AllAccessDisabled = "AllAccessDisabled" + MalformedPolicy = "MalformedPolicy" + MissingFields = "MissingFields" + AuthorizationQueryParametersError = "AuthorizationQueryParametersError" + MalformedDate = "MalformedDate" + BucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + InvalidDuration = "InvalidDuration" + XAmzContentSHA256Mismatch = "XAmzContentSHA256Mismatch" + XMinioInvalidObjectName = "XMinioInvalidObjectName" + NoSuchCORSConfiguration = "NoSuchCORSConfiguration" + BucketAlreadyExists = "BucketAlreadyExists" + NoSuchVersion = "NoSuchVersion" + NoSuchTagSet = "NoSuchTagSet" + Testing = "Testing" + Success = "Success" +) + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + AccessDenied: "Access Denied.", + BadDigest: "The Content-Md5 you specified did not match what we received.", + EntityTooSmall: "Your proposed upload is smaller than the minimum allowed object size.", + EntityTooLarge: "Your proposed upload exceeds the maximum allowed object size.", + IncompleteBody: "You did not provide the number of bytes specified by the Content-Length HTTP header.", + InternalError: "We encountered an internal error, please try again.", + InvalidAccessKeyID: "The access key ID you provided does not exist in our records.", + InvalidBucketName: "The specified bucket is not valid.", + InvalidDigest: "The Content-Md5 you specified is not valid.", + InvalidRange: "The requested range is not satisfiable.", + MalformedXML: "The XML you provided was not well-formed or did not validate against our published schema.", + MissingContentLength: "You must provide the Content-Length HTTP header.", + MissingContentMD5: "Missing required header for this request: Content-Md5.", + MissingRequestBodyError: "Request body is empty.", + NoSuchBucket: "The specified bucket does not exist.", + NoSuchBucketPolicy: "The bucket policy does not exist.", + NoSuchKey: "The specified key does not exist.", + NoSuchUpload: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + NotImplemented: "A header you provided implies functionality that is not implemented.", + PreconditionFailed: "At least one of the pre-conditions you specified did not hold.", + RequestTimeTooSkewed: "The difference between the request time and the server's time is too large.", + SignatureDoesNotMatch: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + MethodNotAllowed: "The specified method is not allowed against this resource.", + InvalidPart: "One or more of the specified parts could not be found.", + InvalidPartOrder: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + InvalidObjectState: "The operation is not valid for the current state of the object.", + AuthorizationHeaderMalformed: "The authorization header is malformed; the region is wrong.", + MalformedPOSTRequest: "The body of your POST request is not well-formed multipart/form-data.", + BucketNotEmpty: "The bucket you tried to delete is not empty.", + AllAccessDisabled: "All access to this bucket has been disabled.", + MalformedPolicy: "Policy has invalid resource.", + MissingFields: "Missing fields in request.", + AuthorizationQueryParametersError: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + MalformedDate: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + BucketAlreadyOwnedByYou: "Your previous request to create the named bucket succeeded and you already own it.", + InvalidDuration: "Duration provided in the request is invalid.", + XAmzContentSHA256Mismatch: "The provided 'x-amz-content-sha256' header does not match what was computed.", + NoSuchCORSConfiguration: "The specified bucket does not have a CORS configuration.", + Conflict: "Bucket not empty.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go new file mode 100644 index 000000000000..1bff6646284a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -0,0 +1,83 @@ +//go:build go1.7 || go1.8 +// +build go1.7 go1.8 + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/tls" + "crypto/x509" + "net" + "net/http" + "os" + "time" +) + +// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) +func mustGetSystemCertPool() *x509.CertPool { + pool, err := x509.SystemCertPool() + if err != nil { + return x509.NewCertPool() + } + return pool +} + +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport = func(secure bool) (*http.Transport, error) { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 256, + MaxIdleConnsPerHost: 16, + ResponseHeaderTimeout: time.Minute, + IdleConnTimeout: time.Minute, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 10 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + } + + if secure { + tr.TLSClientConfig = &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + } + if f := os.Getenv("SSL_CERT_FILE"); f != "" { + rootCAs := mustGetSystemCertPool() + data, err := os.ReadFile(f) + if err == nil { + rootCAs.AppendCertsFromPEM(data) + } + tr.TLSClientConfig.RootCAs = rootCAs + } + } + return tr, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go new file mode 100644 index 000000000000..6c8dc9433934 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -0,0 +1,864 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "crypto/md5" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "fmt" + "hash" + "io" + "math/rand" + "mime" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +func trimEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + return strings.TrimSuffix(etag, "\"") +} + +var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) + +func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { + if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { + expTime, err := parseRFC7231Time(matches[1]) + if err != nil { + return time.Time{}, "" + } + return expTime, matches[2] + } + return time.Time{}, "" +} + +var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) + +func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { + matches := restoreRegex.FindStringSubmatch(restore) + if len(matches) != 4 { + return false, time.Time{}, errors.New("unexpected restore header") + } + ongoing, err = strconv.ParseBool(matches[1]) + if err != nil { + return false, time.Time{}, err + } + if matches[3] != "" { + expTime, err = parseRFC7231Time(matches[3]) + if err != nil { + return false, time.Time{}, err + } + } + return ongoing, expTime, err +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { + hash := newSHA256Hasher() + defer hash.Close() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { + hash := newMd5Hasher() + defer hash.Close() + hash.Write(data) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return errInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return errInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + host := endpointURL.Hostname() + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." + return errInvalidArgument(msg) + } + + if strings.Contains(host, ".s3.amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return errInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return errInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + preserveKeys := []string{ + "Content-Type", + "Cache-Control", + "Content-Encoding", + "Content-Language", + "Content-Disposition", + "X-Amz-Storage-Class", + "X-Amz-Object-Lock-Mode", + "X-Amz-Object-Lock-Retain-Until-Date", + "X-Amz-Object-Lock-Legal-Hold", + "X-Amz-Website-Redirect-Location", + "X-Amz-Server-Side-Encryption", + "X-Amz-Tagging-Count", + "X-Amz-Meta-", + "X-Minio-Meta-", + // Add new headers to be preserved. + // if you add new headers here, please extend + // PutObjectOptions{} to preserve them + // upon upload as well. + } + filteredHeader := make(http.Header) + for k, v := range header { + var found bool + for _, prefix := range preserveKeys { + if !strings.HasPrefix(k, prefix) { + continue + } + found = true + if prefix == "X-Amz-Meta-" || prefix == "X-Minio-Meta-" { + for index, val := range v { + if strings.HasPrefix(val, "=?") { + decoder := mime.WordDecoder{} + if decoded, err := decoder.DecodeHeader(val); err == nil { + v[index] = decoded + } + } + } + } + break + } + if found { + filteredHeader[k] = v + } + } + return filteredHeader +} + +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +func parseTime(t string, formats ...string) (time.Time, error) { + for _, format := range formats { + tt, err := time.Parse(format, t) + if err == nil { + return tt, nil + } + } + return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) +} + +func parseRFC7231Time(lastModified string) (time.Time, error) { + return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) +} + +// ToObjectInfo converts http header values into ObjectInfo type, +// extracts metadata and fills in all the necessary fields in ObjectInfo. +func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) { + var err error + // Trim off the odd double quotes from ETag in the beginning and end. + etag := trimEtag(h.Get("ETag")) + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: InternalError, + Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Parse Last-Modified has http time format. + mtime, err := parseRFC7231Time(h.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: InternalError, + Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(h.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + expiryStr := h.Get("Expires") + var expiry time.Time + if expiryStr != "" { + expiry, err = parseRFC7231Time(expiryStr) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: InternalError, + Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + metadata := extractObjMetadata(h) + userMetadata := make(map[string]string) + for k, v := range metadata { + if strings.HasPrefix(k, "X-Amz-Meta-") { + userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] + } + } + + userTags, err := tags.ParseObjectTags(h.Get(amzTaggingHeader)) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: InternalError, + } + } + + var tagCount int + if count := h.Get(amzTaggingCount); count != "" { + tagCount, err = strconv.Atoi(count) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: InternalError, + Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Nil if not found + var restore *RestoreInfo + if restoreHdr := h.Get(amzRestore); restoreHdr != "" { + ongoing, expTime, err := amzRestoreToStruct(restoreHdr) + if err != nil { + return ObjectInfo{}, err + } + restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) + + deleteMarker := h.Get(amzDeleteMarker) == "true" + + // Save object metadata info. + return ObjectInfo{ + ETag: etag, + Key: objectName, + Size: size, + LastModified: mtime, + ContentType: contentType, + Expires: expiry, + VersionID: h.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationStatus: h.Get(amzReplicationStatus), + Expiration: expTime, + ExpirationRuleID: ruleID, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: metadata, + UserMetadata: userMetadata, + UserTags: userTags.ToMap(), + UserTagCount: tagCount, + Restore: restore, + + // Checksum values + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), + ChecksumMode: h.Get(ChecksumFullObjectMode.Key()), + }, nil +} + +var readFull = func(r io.Reader, buf []byte) (n int, err error) { + // ReadFull reads exactly len(buf) bytes from r into buf. + // It returns the number of bytes copied and an error if + // fewer bytes were read. The error is EOF only if no bytes + // were read. If an EOF happens after reading some but not + // all the bytes, ReadFull returns ErrUnexpectedEOF. + // On return, n == len(buf) if and only if err == nil. + // If r returns an error having read at least len(buf) bytes, + // the error is dropped. + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + // Some spurious io.Reader's return + // io.ErrUnexpectedEOF when nn == 0 + // this behavior is undocumented + // so we are on purpose not using io.ReadFull + // implementation because this can lead + // to custom handling, to avoid that + // we simply modify the original io.ReadFull + // implementation to avoid this issue. + // io.ErrUnexpectedEOF with nn == 0 really + // means that io.EOF + if err == io.ErrUnexpectedEOF && nn == 0 { + err = io.EOF + } + n += nn + } + if n >= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + // Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + region := s3utils.GetRegionFromURL(u) + if region == "" { + region = "us-east-1" + } + return region +} + +var supportedHeaders = map[string]bool{ + "content-type": true, + "cache-control": true, + "content-encoding": true, + "content-disposition": true, + "content-language": true, + "x-amz-website-redirect-location": true, + "x-amz-object-lock-mode": true, + "x-amz-metadata-directive": true, + "x-amz-object-lock-retain-until-date": true, + "expires": true, + "x-amz-replication-status": true, + // Add more supported headers here. + // Must be lower case. +} + +// isStorageClassHeader returns true if the header is a supported storage class header +func isStorageClassHeader(headerKey string) bool { + return strings.EqualFold(amzStorageClass, headerKey) +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + return supportedHeaders[strings.ToLower(headerKey)] +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = map[string]bool{ + "x-amz-server-side-encryption": true, + "x-amz-server-side-encryption-aws-kms-key-id": true, + "x-amz-server-side-encryption-context": true, + "x-amz-server-side-encryption-customer-algorithm": true, + "x-amz-server-side-encryption-customer-key": true, + "x-amz-server-side-encryption-customer-key-md5": true, + // Add more supported headers here. + // Must be lower case. +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + return sseHeaders[strings.ToLower(headerKey)] +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") +} + +// isMinioHeader returns true if header is x-minio- header. +func isMinioHeader(headerKey string) bool { + return strings.HasPrefix(strings.ToLower(headerKey), "x-minio-") +} + +// supportedQueryValues is a list of query strings that can be passed in when using GetObject. +var supportedQueryValues = map[string]bool{ + "attributes": true, + "partNumber": true, + "versionId": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "response-content-language": true, + "response-content-type": true, + "response-expires": true, +} + +// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized. +func isStandardQueryValue(qsKey string) bool { + return supportedQueryValues[qsKey] +} + +// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the +// set of query params starting with "x-" are ignored by S3. +const allowedCustomQueryPrefix = "x-" + +func isCustomQueryValue(qsKey string) bool { + return strings.HasPrefix(qsKey, allowedCustomQueryPrefix) +} + +var ( + md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} + sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +) + +func newMd5Hasher() md5simd.Hasher { + return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} +} + +func newSHA256Hasher() md5simd.Hasher { + return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} +} + +// hashWrapper implements the md5simd.Hasher interface. +type hashWrapper struct { + hash.Hash + isMD5 bool + isSHA256 bool +} + +// Close will put the hasher back into the pool. +func (m *hashWrapper) Close() { + if m.isMD5 && m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + } + if m.isSHA256 && m.Hash != nil { + m.Reset() + sha256Pool.Put(m.Hash) + } + m.Hash = nil +} + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +// IsNetworkOrHostDown - if there was a network error or if the host is down. +// expectTimeouts indicates that *context* timeouts are expected and does not +// indicate a downed host. Other timeouts still returns down. +func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { + if err == nil { + return false + } + + if errors.Is(err, context.Canceled) { + return false + } + + if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { + return false + } + + if errors.Is(err, context.DeadlineExceeded) { + return true + } + + // We need to figure if the error either a timeout + // or a non-temporary error. + urlErr := &url.Error{} + if errors.As(err, &urlErr) { + switch urlErr.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError, *tls.CertificateVerificationError: + return true + } + } + var e net.Error + if errors.As(err, &e) { + if e.Timeout() { + return true + } + } + + // Fallback to other mechanisms. + switch { + case strings.Contains(err.Error(), "Connection closed by foreign host"): + return true + case strings.Contains(err.Error(), "TLS handshake timeout"): + // If error is - tlsHandshakeTimeoutError. + return true + case strings.Contains(err.Error(), "i/o timeout"): + // If error is - tcp timeoutError. + return true + case strings.Contains(err.Error(), "connection timed out"): + // If err is a net.Dial timeout. + return true + case strings.Contains(err.Error(), "connection refused"): + // If err is connection refused + return true + case strings.Contains(err.Error(), "server gave HTTP response to HTTPS client"): + // If err is TLS client is used with HTTP server + return true + case strings.Contains(err.Error(), "Client sent an HTTP request to an HTTPS server"): + // If err is plain-text Client is used with a HTTPS server + return true + case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): + // Denial errors + return true + } + return false +} + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} + +// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration. +// Used uint for unsigned long. Used uint32 for input arguments in order to match +// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib) +// Modified for hash/crc64 by Klaus Post, 2024. +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + + for vec != 0 { + if vec&1 != 0 { + sum ^= mat[0] + } + vec >>= 1 + mat = mat[1:] + } + return sum +} + +func gf2MatrixSquare(square, mat []uint64) { + if len(square) != len(mat) { + panic("square matrix size mismatch") + } + for n := range mat { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32 +// hash values crc1 and crc2. poly represents the generator polynomial +// and len2 specifies the byte length that the crc2 hash covers. +func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 { + // degenerate case (also disallow negative lengths) + if len2 <= 0 { + return crc1 + } + + even := make([]uint64, 32) // even-power-of-two zeros operator + odd := make([]uint64, 32) // odd-power-of-two zeros operator + + // put operator for one zero bit in odd + odd[0] = uint64(poly) // CRC-32 polynomial + row := uint64(1) + for n := 1; n < 32; n++ { + odd[n] = row + row <<= 1 + } + + // put operator for two zero bits in even + gf2MatrixSquare(even, odd) + + // put operator for four zero bits in odd + gf2MatrixSquare(odd, even) + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + crc1n := uint64(crc1) + for { + // apply zeros operator for this bit of len2 + gf2MatrixSquare(even, odd) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(even, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + + // another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd, even) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(odd, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + } + + // return combined crc + crc1n ^= uint64(crc2) + return uint32(crc1n) +} + +func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 { + // degenerate case (also disallow negative lengths) + if len2 <= 0 { + return crc1 + } + + even := make([]uint64, 64) // even-power-of-two zeros operator + odd := make([]uint64, 64) // odd-power-of-two zeros operator + + // put operator for one zero bit in odd + odd[0] = poly // CRC-64 polynomial + row := uint64(1) + for n := 1; n < 64; n++ { + odd[n] = row + row <<= 1 + } + + // put operator for two zero bits in even + gf2MatrixSquare(even, odd) + + // put operator for four zero bits in odd + gf2MatrixSquare(odd, even) + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + crc1n := crc1 + for { + // apply zeros operator for this bit of len2 + gf2MatrixSquare(even, odd) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(even, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + + // another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd, even) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(odd, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + } + + // return combined crc + crc1n ^= crc2 + return crc1n +} diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md new file mode 100644 index 000000000000..1ac6a81f6aed --- /dev/null +++ b/vendor/github.com/philhofer/fwd/LICENSE.md @@ -0,0 +1,7 @@ +Copyright (c) 2014-2015, Philip Hofer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md new file mode 100644 index 000000000000..4e9952342697 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/README.md @@ -0,0 +1,368 @@ + +# fwd + +[![Go Reference](https://pkg.go.dev/badge/github.com/philhofer/fwd.svg)](https://pkg.go.dev/github.com/philhofer/fwd) + + +`import "github.com/philhofer/fwd"` + +* [Overview](#pkg-overview) +* [Index](#pkg-index) + +## Overview +Package fwd provides a buffered reader +and writer. Each has methods that help improve +the encoding/decoding performance of some binary +protocols. + +The `Writer` and `Reader` type provide similar +functionality to their counterparts in `bufio`, plus +a few extra utility methods that simplify read-ahead +and write-ahead. I wrote this package to improve serialization +performance for [github.com/tinylib/msgp](https://github.com/tinylib/msgp), +where it provided about a 2x speedup over `bufio` for certain +workloads. However, care must be taken to understand the semantics of the +extra methods provided by this package, as they allow +the user to access and manipulate the buffer memory +directly. + +The extra methods for `fwd.Reader` are `Peek`, `Skip` +and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +will re-allocate the read buffer in order to accommodate arbitrarily +large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +in the stream, and uses the `io.Seeker` interface if the underlying +stream implements it. `(*fwd.Reader).Next` returns a slice pointing +to the next `n` bytes in the read buffer (like `Peek`), but also +increments the read position. This allows users to process streams +in arbitrary block sizes without having to manage appropriately-sized +slices. Additionally, obviating the need to copy the data from the +buffer to another location in memory can improve performance dramatically +in CPU-bound applications. + +`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +returns a slice pointing to the next `n` bytes of the writer, and increments +the write position by the length of the returned slice. This allows users +to write directly to the end of the buffer. + + +## Portability + +Because it uses the unsafe package, there are theoretically +no promises about forward or backward portability. + +To stay compatible with tinygo 0.32, unsafestr() has been updated +to use unsafe.Slice() as suggested by +https://tinygo.org/docs/guides/compatibility, which also required +bumping go.mod to require at least go 1.20. + + +## Index +* [Constants](#pkg-constants) +* [type Reader](#Reader) + * [func NewReader(r io.Reader) *Reader](#NewReader) + * [func NewReaderBuf(r io.Reader, buf []byte) *Reader](#NewReaderBuf) + * [func NewReaderSize(r io.Reader, n int) *Reader](#NewReaderSize) + * [func (r *Reader) BufferSize() int](#Reader.BufferSize) + * [func (r *Reader) Buffered() int](#Reader.Buffered) + * [func (r *Reader) Next(n int) ([]byte, error)](#Reader.Next) + * [func (r *Reader) Peek(n int) ([]byte, error)](#Reader.Peek) + * [func (r *Reader) Read(b []byte) (int, error)](#Reader.Read) + * [func (r *Reader) ReadByte() (byte, error)](#Reader.ReadByte) + * [func (r *Reader) ReadFull(b []byte) (int, error)](#Reader.ReadFull) + * [func (r *Reader) Reset(rd io.Reader)](#Reader.Reset) + * [func (r *Reader) Skip(n int) (int, error)](#Reader.Skip) + * [func (r *Reader) WriteTo(w io.Writer) (int64, error)](#Reader.WriteTo) +* [type Writer](#Writer) + * [func NewWriter(w io.Writer) *Writer](#NewWriter) + * [func NewWriterBuf(w io.Writer, buf []byte) *Writer](#NewWriterBuf) + * [func NewWriterSize(w io.Writer, n int) *Writer](#NewWriterSize) + * [func (w *Writer) BufferSize() int](#Writer.BufferSize) + * [func (w *Writer) Buffered() int](#Writer.Buffered) + * [func (w *Writer) Flush() error](#Writer.Flush) + * [func (w *Writer) Next(n int) ([]byte, error)](#Writer.Next) + * [func (w *Writer) ReadFrom(r io.Reader) (int64, error)](#Writer.ReadFrom) + * [func (w *Writer) Write(p []byte) (int, error)](#Writer.Write) + * [func (w *Writer) WriteByte(b byte) error](#Writer.WriteByte) + * [func (w *Writer) WriteString(s string) (int, error)](#Writer.WriteString) + + +## Constants +``` go +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 +) +``` +``` go +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 +) +``` + + + +## type Reader +``` go +type Reader struct { + // contains filtered or unexported fields +} +``` +Reader is a buffered look-ahead reader + + + + + + + + + +### func NewReader +``` go +func NewReader(r io.Reader) *Reader +``` +NewReader returns a new *Reader that reads from 'r' + + +### func NewReaderSize +``` go +func NewReaderSize(r io.Reader, n int) *Reader +``` +NewReaderSize returns a new *Reader that +reads from 'r' and has a buffer size 'n' + + + + +### func (\*Reader) BufferSize +``` go +func (r *Reader) BufferSize() int +``` +BufferSize returns the total size of the buffer + + + +### func (\*Reader) Buffered +``` go +func (r *Reader) Buffered() int +``` +Buffered returns the number of bytes currently in the buffer + + + +### func (\*Reader) Next +``` go +func (r *Reader) Next(n int) ([]byte, error) +``` +Next returns the next 'n' bytes in the stream. +Unlike Peek, Next advances the reader position. +The returned bytes point to the same +data as the buffer, so the slice is +only valid until the next reader method call. +An EOF is considered an unexpected error. +If an the returned slice is less than the +length asked for, an error will be returned, +and the reader position will not be incremented. + + + +### func (\*Reader) Peek +``` go +func (r *Reader) Peek(n int) ([]byte, error) +``` +Peek returns the next 'n' buffered bytes, +reading from the underlying reader if necessary. +It will only return a slice shorter than 'n' bytes +if it also returns an error. Peek does not advance +the reader. EOF errors are *not* returned as +io.ErrUnexpectedEOF. + + + +### func (\*Reader) Read +``` go +func (r *Reader) Read(b []byte) (int, error) +``` +Read implements `io.Reader`. + + + +### func (\*Reader) ReadByte +``` go +func (r *Reader) ReadByte() (byte, error) +``` +ReadByte implements `io.ByteReader`. + + + +### func (\*Reader) ReadFull +``` go +func (r *Reader) ReadFull(b []byte) (int, error) +``` +ReadFull attempts to read len(b) bytes into +'b'. It returns the number of bytes read into +'b', and an error if it does not return len(b). +EOF is considered an unexpected error. + + + +### func (\*Reader) Reset +``` go +func (r *Reader) Reset(rd io.Reader) +``` +Reset resets the underlying reader +and the read buffer. + + + +### func (\*Reader) Skip +``` go +func (r *Reader) Skip(n int) (int, error) +``` +Skip moves the reader forward 'n' bytes. +Returns the number of bytes skipped and any +errors encountered. It is analogous to Seek(n, 1). +If the underlying reader implements io.Seeker, then +that method will be used to skip forward. + +If the reader encounters +an EOF before skipping 'n' bytes, it +returns `io.ErrUnexpectedEOF`. If the +underlying reader implements `io.Seeker`, then +those rules apply instead. (Many implementations +will not return `io.EOF` until the next call +to Read). + + + + +### func (\*Reader) WriteTo +``` go +func (r *Reader) WriteTo(w io.Writer) (int64, error) +``` +WriteTo implements `io.WriterTo`. + + + + +## type Writer +``` go +type Writer struct { + // contains filtered or unexported fields +} + +``` +Writer is a buffered writer + + + + + + + +### func NewWriter +``` go +func NewWriter(w io.Writer) *Writer +``` +NewWriter returns a new writer +that writes to 'w' and has a buffer +that is `DefaultWriterSize` bytes. + + +### func NewWriterBuf +``` go +func NewWriterBuf(w io.Writer, buf []byte) *Writer +``` +NewWriterBuf returns a new writer +that writes to 'w' and has 'buf' as a buffer. +'buf' is not used when has smaller capacity than 18, +custom buffer is allocated instead. + + +### func NewWriterSize +``` go +func NewWriterSize(w io.Writer, n int) *Writer +``` +NewWriterSize returns a new writer that +writes to 'w' and has a buffer size 'n'. + +### func (\*Writer) BufferSize +``` go +func (w *Writer) BufferSize() int +``` +BufferSize returns the maximum size of the buffer. + + + +### func (\*Writer) Buffered +``` go +func (w *Writer) Buffered() int +``` +Buffered returns the number of buffered bytes +in the reader. + + + +### func (\*Writer) Flush +``` go +func (w *Writer) Flush() error +``` +Flush flushes any buffered bytes +to the underlying writer. + + + +### func (\*Writer) Next +``` go +func (w *Writer) Next(n int) ([]byte, error) +``` +Next returns the next 'n' free bytes +in the write buffer, flushing the writer +as necessary. Next will return `io.ErrShortBuffer` +if 'n' is greater than the size of the write buffer. +Calls to 'next' increment the write position by +the size of the returned buffer. + + + +### func (\*Writer) ReadFrom +``` go +func (w *Writer) ReadFrom(r io.Reader) (int64, error) +``` +ReadFrom implements `io.ReaderFrom` + + + +### func (\*Writer) Write +``` go +func (w *Writer) Write(p []byte) (int, error) +``` +Write implements `io.Writer` + + + +### func (\*Writer) WriteByte +``` go +func (w *Writer) WriteByte(b byte) error +``` +WriteByte implements `io.ByteWriter` + + + +### func (\*Writer) WriteString +``` go +func (w *Writer) WriteString(s string) (int, error) +``` +WriteString is analogous to Write, but it takes a string. + + + + + + + + +- - - +Generated by [godoc2md](https://github.com/davecheney/godoc2md) diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go new file mode 100644 index 000000000000..a24a896e2bc4 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader.go @@ -0,0 +1,445 @@ +// Package fwd provides a buffered reader +// and writer. Each has methods that help improve +// the encoding/decoding performance of some binary +// protocols. +// +// The [Writer] and [Reader] type provide similar +// functionality to their counterparts in [bufio], plus +// a few extra utility methods that simplify read-ahead +// and write-ahead. I wrote this package to improve serialization +// performance for http://github.com/tinylib/msgp, +// where it provided about a 2x speedup over `bufio` for certain +// workloads. However, care must be taken to understand the semantics of the +// extra methods provided by this package, as they allow +// the user to access and manipulate the buffer memory +// directly. +// +// The extra methods for [Reader] are [Reader.Peek], [Reader.Skip] +// and [Reader.Next]. (*fwd.Reader).Peek, unlike (*bufio.Reader).Peek, +// will re-allocate the read buffer in order to accommodate arbitrarily +// large read-ahead. (*fwd.Reader).Skip skips the next 'n' bytes +// in the stream, and uses the [io.Seeker] interface if the underlying +// stream implements it. (*fwd.Reader).Next returns a slice pointing +// to the next 'n' bytes in the read buffer (like Reader.Peek), but also +// increments the read position. This allows users to process streams +// in arbitrary block sizes without having to manage appropriately-sized +// slices. Additionally, obviating the need to copy the data from the +// buffer to another location in memory can improve performance dramatically +// in CPU-bound applications. +// +// [Writer] only has one extra method, which is (*fwd.Writer).Next, which +// returns a slice pointing to the next 'n' bytes of the writer, and increments +// the write position by the length of the returned slice. This allows users +// to write directly to the end of the buffer. +package fwd + +import ( + "io" + "os" +) + +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 + + // minimum read buffer; straight from bufio + minReaderSize = 16 +) + +// NewReader returns a new *Reader that reads from 'r' +func NewReader(r io.Reader) *Reader { + return NewReaderSize(r, DefaultReaderSize) +} + +// NewReaderSize returns a new *Reader that +// reads from 'r' and has a buffer size 'n'. +func NewReaderSize(r io.Reader, n int) *Reader { + buf := make([]byte, 0, max(n, minReaderSize)) + return NewReaderBuf(r, buf) +} + +// NewReaderBuf returns a new *Reader that +// reads from 'r' and uses 'buf' as a buffer. +// 'buf' is not used when has smaller capacity than 16, +// custom buffer is allocated instead. +func NewReaderBuf(r io.Reader, buf []byte) *Reader { + if cap(buf) < minReaderSize { + buf = make([]byte, 0, minReaderSize) + } + buf = buf[:0] + rd := &Reader{ + r: r, + data: buf, + } + if s, ok := r.(io.Seeker); ok { + rd.rs = s + } + return rd +} + +// Reader is a buffered look-ahead reader +type Reader struct { + r io.Reader // underlying reader + + // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space + data []byte // data + n int // read offset + inputOffset int64 // offset in the input stream + state error // last read error + + // if the reader past to NewReader was + // also an io.Seeker, this is non-nil + rs io.Seeker +} + +// Reset resets the underlying reader +// and the read buffer. +func (r *Reader) Reset(rd io.Reader) { + r.r = rd + r.data = r.data[0:0] + r.n = 0 + r.inputOffset = 0 + r.state = nil + if s, ok := rd.(io.Seeker); ok { + r.rs = s + } else { + r.rs = nil + } +} + +// more() does one read on the underlying reader +func (r *Reader) more() { + // move data backwards so that + // the read offset is 0; this way + // we can supply the maximum number of + // bytes to the reader + if r.n != 0 { + if r.n < len(r.data) { + r.data = r.data[:copy(r.data[0:], r.data[r.n:])] + } else { + r.data = r.data[:0] + } + r.n = 0 + } + var a int + a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) + if a == 0 && r.state == nil { + r.state = io.ErrNoProgress + return + } else if a > 0 && r.state == io.EOF { + // discard the io.EOF if we read more than 0 bytes. + // the next call to Read should return io.EOF again. + r.state = nil + } else if r.state != nil { + return + } + r.data = r.data[:len(r.data)+a] +} + +// pop error +func (r *Reader) err() (e error) { + e, r.state = r.state, nil + return +} + +// pop error; EOF -> io.ErrUnexpectedEOF +func (r *Reader) noEOF() (e error) { + e, r.state = r.state, nil + if e == io.EOF { + e = io.ErrUnexpectedEOF + } + return +} + +// buffered bytes +func (r *Reader) buffered() int { return len(r.data) - r.n } + +// Buffered returns the number of bytes currently in the buffer +func (r *Reader) Buffered() int { return len(r.data) - r.n } + +// BufferSize returns the total size of the buffer +func (r *Reader) BufferSize() int { return cap(r.data) } + +// InputOffset returns the input stream byte offset of the current reader position +func (r *Reader) InputOffset() int64 { return r.inputOffset } + +// Peek returns the next 'n' buffered bytes, +// reading from the underlying reader if necessary. +// It will only return a slice shorter than 'n' bytes +// if it also returns an error. Peek does not advance +// the reader. EOF errors are *not* returned as +// io.ErrUnexpectedEOF. +func (r *Reader) Peek(n int) ([]byte, error) { + // in the degenerate case, + // we may need to realloc + // (the caller asked for more + // bytes than the size of the buffer) + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return r.data[r.n:], r.err() + } + + return r.data[r.n : r.n+n], nil +} + +func (r *Reader) PeekByte() (b byte, err error) { + if len(r.data)-r.n >= 1 { + b = r.data[r.n] + } else { + b, err = r.peekByte() + } + return +} + +func (r *Reader) peekByte() (byte, error) { + const n = 1 + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return 0, r.err() + } + return r.data[r.n], nil +} + +// discard(n) discards up to 'n' buffered bytes, and +// and returns the number of bytes discarded +func (r *Reader) discard(n int) int { + inbuf := r.buffered() + if inbuf <= n { + r.n = 0 + r.inputOffset += int64(inbuf) + r.data = r.data[:0] + return inbuf + } + r.n += n + r.inputOffset += int64(n) + return n +} + +// Skip moves the reader forward 'n' bytes. +// Returns the number of bytes skipped and any +// errors encountered. It is analogous to Seek(n, 1). +// If the underlying reader implements io.Seeker, then +// that method will be used to skip forward. +// +// If the reader encounters +// an EOF before skipping 'n' bytes, it +// returns [io.ErrUnexpectedEOF]. If the +// underlying reader implements [io.Seeker], then +// those rules apply instead. (Many implementations +// will not return [io.EOF] until the next call +// to Read). +func (r *Reader) Skip(n int) (int, error) { + if n < 0 { + return 0, os.ErrInvalid + } + + // discard some or all of the current buffer + skipped := r.discard(n) + + // if we can Seek() through the remaining bytes, do that + if n > skipped && r.rs != nil { + nn, err := r.rs.Seek(int64(n-skipped), 1) + r.inputOffset += nn + return int(nn) + skipped, err + } + // otherwise, keep filling the buffer + // and discarding it up to 'n' + for skipped < n && r.state == nil { + r.more() + skipped += r.discard(n - skipped) + } + return skipped, r.noEOF() +} + +// Next returns the next 'n' bytes in the stream. +// Unlike Peek, Next advances the reader position. +// The returned bytes point to the same +// data as the buffer, so the slice is +// only valid until the next reader method call. +// An EOF is considered an unexpected error. +// If an the returned slice is less than the +// length asked for, an error will be returned, +// and the reader position will not be incremented. +func (r *Reader) Next(n int) (b []byte, err error) { + if r.state == nil && len(r.data)-r.n >= n { + b = r.data[r.n : r.n+n] + r.n += n + r.inputOffset += int64(n) + } else { + b, err = r.next(n) + } + return +} + +func (r *Reader) next(n int) ([]byte, error) { + // in case the buffer is too small + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // fill at least 'n' bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + if r.buffered() < n { + return r.data[r.n:], r.noEOF() + } + out := r.data[r.n : r.n+n] + r.n += n + r.inputOffset += int64(n) + return out, nil +} + +// Read implements [io.Reader]. +func (r *Reader) Read(b []byte) (int, error) { + // if we have data in the buffer, just + // return that. + if r.buffered() != 0 { + x := copy(b, r.data[r.n:]) + r.n += x + r.inputOffset += int64(x) + return x, nil + } + var n int + // we have no buffered data; determine + // whether or not to buffer or call + // the underlying reader directly + if len(b) >= cap(r.data) { + n, r.state = r.r.Read(b) + } else { + r.more() + n = copy(b, r.data) + r.n = n + } + if n == 0 { + return 0, r.err() + } + + r.inputOffset += int64(n) + + return n, nil +} + +// ReadFull attempts to read len(b) bytes into +// 'b'. It returns the number of bytes read into +// 'b', and an error if it does not return len(b). +// EOF is considered an unexpected error. +func (r *Reader) ReadFull(b []byte) (int, error) { + var n int // read into b + var nn int // scratch + l := len(b) + // either read buffered data, + // or read directly for the underlying + // buffer, or fetch more buffered data. + for n < l && r.state == nil { + if r.buffered() != 0 { + nn = copy(b[n:], r.data[r.n:]) + n += nn + r.n += nn + r.inputOffset += int64(nn) + } else if l-n > cap(r.data) { + nn, r.state = r.r.Read(b[n:]) + n += nn + r.inputOffset += int64(nn) + } else { + r.more() + } + } + if n < l { + return n, r.noEOF() + } + return n, nil +} + +// ReadByte implements [io.ByteReader]. +func (r *Reader) ReadByte() (byte, error) { + for r.buffered() < 1 && r.state == nil { + r.more() + } + if r.buffered() < 1 { + return 0, r.err() + } + b := r.data[r.n] + r.n++ + r.inputOffset++ + + return b, nil +} + +// WriteTo implements [io.WriterTo]. +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + var ( + i int64 + ii int + err error + ) + // first, clear buffer + if r.buffered() > 0 { + ii, err = w.Write(r.data[r.n:]) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + r.inputOffset += int64(ii) + } + for r.state == nil { + // here we just do + // 1:1 reads and writes + r.more() + if r.buffered() > 0 { + ii, err = w.Write(r.data) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + r.inputOffset += int64(ii) + } + } + if r.state != io.EOF { + return i, r.err() + } + return i, nil +} + +func max(a int, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go new file mode 100644 index 000000000000..4d6ea15b3340 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer.go @@ -0,0 +1,236 @@ +package fwd + +import "io" + +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 + + minWriterSize = minReaderSize +) + +// Writer is a buffered writer +type Writer struct { + w io.Writer // writer + buf []byte // 0:len(buf) is bufered data +} + +// NewWriter returns a new writer +// that writes to 'w' and has a buffer +// that is `DefaultWriterSize` bytes. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, DefaultWriterSize), + } +} + +// NewWriterSize returns a new writer that +// writes to 'w' and has a buffer size 'n'. +func NewWriterSize(w io.Writer, n int) *Writer { + if wr, ok := w.(*Writer); ok && cap(wr.buf) >= n { + return wr + } + buf := make([]byte, 0, max(n, minWriterSize)) + return NewWriterBuf(w, buf) +} + +// NewWriterBuf returns a new writer +// that writes to 'w' and has 'buf' as a buffer. +// 'buf' is not used when has smaller capacity than 18, +// custom buffer is allocated instead. +func NewWriterBuf(w io.Writer, buf []byte) *Writer { + if cap(buf) < minWriterSize { + buf = make([]byte, 0, minWriterSize) + } + buf = buf[:0] + return &Writer{ + w: w, + buf: buf, + } +} + +// Buffered returns the number of buffered bytes +// in the reader. +func (w *Writer) Buffered() int { return len(w.buf) } + +// BufferSize returns the maximum size of the buffer. +func (w *Writer) BufferSize() int { return cap(w.buf) } + +// Flush flushes any buffered bytes +// to the underlying writer. +func (w *Writer) Flush() error { + l := len(w.buf) + if l > 0 { + n, err := w.w.Write(w.buf) + + // if we didn't write the whole + // thing, copy the unwritten + // bytes to the beginnning of the + // buffer. + if n < l && n > 0 { + w.pushback(n) + if err == nil { + err = io.ErrShortWrite + } + } + if err != nil { + return err + } + w.buf = w.buf[:0] + return nil + } + return nil +} + +// Write implements `io.Writer` +func (w *Writer) Write(p []byte) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(p) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + if c < ln { + return w.w.Write(p) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], p), nil +} + +// WriteString is analogous to Write, but it takes a string. +func (w *Writer) WriteString(s string) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(s) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + // + // yes, this is unsafe. *but* + // io.Writer is not allowed + // to mutate its input or + // maintain a reference to it, + // per the spec in package io. + // + // plus, if the string is really + // too big to fit in the buffer, then + // creating a copy to write it is + // expensive (and, strictly speaking, + // unnecessary) + if c < ln { + return w.w.Write(unsafestr(s)) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], s), nil +} + +// WriteByte implements `io.ByteWriter` +func (w *Writer) WriteByte(b byte) error { + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + w.buf = append(w.buf, b) + return nil +} + +// Next returns the next 'n' free bytes +// in the write buffer, flushing the writer +// as necessary. Next will return `io.ErrShortBuffer` +// if 'n' is greater than the size of the write buffer. +// Calls to 'next' increment the write position by +// the size of the returned buffer. +func (w *Writer) Next(n int) ([]byte, error) { + c, l := cap(w.buf), len(w.buf) + if n > c { + return nil, io.ErrShortBuffer + } + avail := c - l + if avail < n { + if err := w.Flush(); err != nil { + return nil, err + } + l = len(w.buf) + } + w.buf = w.buf[:l+n] + return w.buf[l:], nil +} + +// take the bytes from w.buf[n:len(w.buf)] +// and put them at the beginning of w.buf, +// and resize to the length of the copied segment. +func (w *Writer) pushback(n int) { + w.buf = w.buf[:copy(w.buf, w.buf[n:])] +} + +// ReadFrom implements `io.ReaderFrom` +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // anticipatory flush + if err := w.Flush(); err != nil { + return 0, err + } + + w.buf = w.buf[0:cap(w.buf)] // expand buffer + + var nn int64 // written + var err error // error + var x int // read + + // 1:1 reads and writes + for err == nil { + x, err = r.Read(w.buf) + if x > 0 { + n, werr := w.w.Write(w.buf[:x]) + nn += int64(n) + + if err != nil { + if n < x && n > 0 { + w.pushback(n - x) + } + return nn, werr + } + if n < x { + w.pushback(n - x) + return nn, io.ErrShortWrite + } + } else if err == nil { + err = io.ErrNoProgress + break + } + } + if err != io.EOF { + return nn, err + } + + // we only clear here + // because we are sure + // the writes have + // succeeded. otherwise, + // we retain the data in case + // future writes succeed. + w.buf = w.buf[0:0] + + return nn, nil +} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go new file mode 100644 index 000000000000..a978e3b6a0fb --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_appengine.go @@ -0,0 +1,6 @@ +//go:build appengine +// +build appengine + +package fwd + +func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_tinygo.go b/vendor/github.com/philhofer/fwd/writer_tinygo.go new file mode 100644 index 000000000000..c98cd57f3c9a --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_tinygo.go @@ -0,0 +1,13 @@ +//go:build tinygo +// +build tinygo + +package fwd + +import ( + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(b string) []byte { + return unsafe.Slice(unsafe.StringData(b), len(b)) +} diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go new file mode 100644 index 000000000000..e4cb4a830d10 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go @@ -0,0 +1,20 @@ +//go:build !appengine && !tinygo +// +build !appengine,!tinygo + +package fwd + +import ( + "reflect" + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(s string) []byte { + var b []byte + sHdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bHdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bHdr.Data = sHdr.Data + bHdr.Len = sHdr.Len + bHdr.Cap = sHdr.Len + return b +} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml new file mode 100644 index 000000000000..c73bb33baa51 --- /dev/null +++ b/vendor/github.com/rs/xid/.appveyor.yml @@ -0,0 +1,27 @@ +version: 1.0.0.{build} + +platform: x64 + +branches: + only: + - master + +clone_folder: c:\gopath\src\github.com\rs\xid + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -t . + +build_script: + - go build + +test_script: + - go test + diff --git a/vendor/github.com/rs/xid/.gitignore b/vendor/github.com/rs/xid/.gitignore new file mode 100644 index 000000000000..81be9277fc7e --- /dev/null +++ b/vendor/github.com/rs/xid/.gitignore @@ -0,0 +1,3 @@ +/.idea +/.vscode +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/rs/xid/.golangci.yml b/vendor/github.com/rs/xid/.golangci.yml new file mode 100644 index 000000000000..7929600a978e --- /dev/null +++ b/vendor/github.com/rs/xid/.golangci.yml @@ -0,0 +1,5 @@ +run: + tests: false + +output: + sort-results: true diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml new file mode 100644 index 000000000000..b37da1594273 --- /dev/null +++ b/vendor/github.com/rs/xid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: +- "1.9" +- "1.10" +- "master" +matrix: + allow_failures: + - go: "master" diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE new file mode 100644 index 000000000000..47c5e9d2d2f7 --- /dev/null +++ b/vendor/github.com/rs/xid/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md new file mode 100644 index 000000000000..1bf45bd11b34 --- /dev/null +++ b/vendor/github.com/rs/xid/README.md @@ -0,0 +1,121 @@ +# Globally Unique ID Generator + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) + +Package xid is a globally unique id generator library, ready to safely be used directly in your server code. + +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization ([base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10)) to make it shorter when transported as a string: +https://docs.mongodb.org/manual/reference/object-id/ + +- 4-byte value representing the seconds since the Unix epoch, +- 3-byte machine identifier, +- 2-byte process id, and +- 3-byte counter, starting with a random value. + +The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +The string representation is using [base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10) (w/o padding) for better space efficiency +when stored in that form (20 bytes). The hex variant of base32 is used to retain the +sortable property of the id. + +Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +issue when transported as a string between various systems. Base36 wasn't retained either +because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). + +UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake +ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central +generator servers. xid stands in between with 12 bytes (96 bits) and a more compact +URL-safe string representation (20 chars). No configuration or central generator server +is required so it can be used directly in server's code. + +| Name | Binary Size | String Size | Features +|-------------|-------------|----------------|---------------- +| [UUID] | 16 bytes | 36 chars | configuration free, not sortable +| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable +| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable +| [MongoID] | 12 bytes | 24 chars | configuration free, sortable +| xid | 12 bytes | 20 chars | configuration free, sortable + +[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier +[shortuuid]: https://github.com/stochastic-technologies/shortuuid +[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake +[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ + +Features: + +- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) +- Non configured, you don't need set a unique machine and/or data center id +- K-ordered +- Embedded time with 1 second precision +- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +- Lock-free (i.e.: unlike UUIDv1 and v2) + +Best used with [zerolog](https://github.com/rs/zerolog)'s +[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). + +Notes: + +- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. + +References: + +- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +- https://en.wikipedia.org/wiki/Universally_unique_identifier +- https://blog.twitter.com/2010/announcing-snowflake +- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid +- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride +- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid +- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid +- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid +- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid +- PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid +- Swift port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/swift-xid +- C++ port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/libxid +- Typescript & Javascript port by [Yiwen AI](https://github.com/yiwen-ai): https://github.com/yiwen-ai/xid-ts +- Gleam port by [Alexandre Del Vecchio](https://github.com/defgenx): https://github.com/defgenx/gxid + +## Install + + go get github.com/rs/xid + +## Usage + +```go +guid := xid.New() + +println(guid.String()) +// Output: 9m4e2mr0ui3e8a215n4g +``` + +Get `xid` embedded info: + +```go +guid.Machine() +guid.Pid() +guid.Time() +guid.Counter() +``` + +## Benchmark + +Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). + +``` +BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op +BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op +BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op +BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op +``` + +Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs. + +## Licenses + +All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go new file mode 100644 index 000000000000..ea2537493bef --- /dev/null +++ b/vendor/github.com/rs/xid/error.go @@ -0,0 +1,11 @@ +package xid + +const ( + // ErrInvalidID is returned when trying to unmarshal an invalid ID. + ErrInvalidID strErr = "xid: invalid ID" +) + +// strErr allows declaring errors as constants. +type strErr string + +func (err strErr) Error() string { return string(err) } diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go new file mode 100644 index 000000000000..17351563a814 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -0,0 +1,34 @@ +// +build darwin + +package xid + +import ( + "errors" + "os/exec" + "strings" +) + +func readPlatformMachineID() (string, error) { + ioreg, err := exec.LookPath("ioreg") + if err != nil { + return "", err + } + + cmd := exec.Command(ioreg, "-rd1", "-c", "IOPlatformExpertDevice") + out, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + + for _, line := range strings.Split(string(out), "\n") { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.SplitAfter(line, `" = "`) + if len(parts) == 2 { + uuid := strings.TrimRight(parts[1], `"`) + return strings.ToLower(uuid), nil + } + } + } + + return "", errors.New("cannot find host id") +} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go new file mode 100644 index 000000000000..7fbd3c004d17 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_fallback.go @@ -0,0 +1,9 @@ +// +build !darwin,!linux,!freebsd,!windows + +package xid + +import "errors" + +func readPlatformMachineID() (string, error) { + return "", errors.New("not implemented") +} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go new file mode 100644 index 000000000000..be25a039e94d --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_freebsd.go @@ -0,0 +1,9 @@ +// +build freebsd + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.hostuuid") +} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go new file mode 100644 index 000000000000..837b20436c53 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package xid + +import "io/ioutil" + +func readPlatformMachineID() (string, error) { + b, err := ioutil.ReadFile("/etc/machine-id") + if err != nil || len(b) == 0 { + b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid") + } + return string(b), err +} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go new file mode 100644 index 000000000000..a4d98ab0e7b8 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -0,0 +1,50 @@ +// +build windows + +package xid + +import ( + "fmt" + "syscall" + "unsafe" +) + +func readPlatformMachineID() (string, error) { + // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go + var h syscall.Handle + + regKeyCryptoPtr, err := syscall.UTF16PtrFromString(`SOFTWARE\Microsoft\Cryptography`) + if err != nil { + return "", fmt.Errorf(`error reading registry key "SOFTWARE\Microsoft\Cryptography": %w`, err) + } + + err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, regKeyCryptoPtr, 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer func() { _ = syscall.RegCloseKey(h) }() + + const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [syscallRegBufLen]uint16 + bufLen := uint32(syscallRegBufLen) + var valType uint32 + + mGuidPtr, err := syscall.UTF16PtrFromString(`MachineGuid`) + if err != nil { + return "", fmt.Errorf("error reading machine GUID: %w", err) + } + + err = syscall.RegQueryValueEx(h, mGuidPtr, nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", fmt.Errorf("error parsing ") + } + + hostID := syscall.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go new file mode 100644 index 000000000000..e88984d9f1f8 --- /dev/null +++ b/vendor/github.com/rs/xid/id.go @@ -0,0 +1,390 @@ +// Package xid is a globally unique id generator suited for web scale +// +// Xid is using Mongo Object ID algorithm to generate globally unique ids: +// https://docs.mongodb.org/manual/reference/object-id/ +// +// - 4-byte value representing the seconds since the Unix epoch, +// - 3-byte machine identifier, +// - 2-byte process id, and +// - 3-byte counter, starting with a random value. +// +// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +// The string representation is using base32 hex (w/o padding) for better space efficiency +// when stored in that form (20 bytes). The hex variant of base32 is used to retain the +// sortable property of the id. +// +// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +// issue when transported as a string between various systems. Base36 wasn't retained either +// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). +// +// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between +// with 12 bytes with a more compact string representation ready for the web and no +// required configuration or central generation server. +// +// Features: +// +// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +// - Base32 hex encoded by default (16 bytes storage when transported as printable string) +// - Non configured, you don't need set a unique machine and/or data center id +// - K-ordered +// - Embedded time with 1 second precision +// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +// +// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). +// +// References: +// +// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +// - https://en.wikipedia.org/wiki/Universally_unique_identifier +// - https://blog.twitter.com/2010/announcing-snowflake +package xid + +import ( + "bytes" + "crypto/sha256" + "crypto/rand" + "database/sql/driver" + "encoding/binary" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "sort" + "sync/atomic" + "time" +) + +// Code inspired from mgo/bson ObjectId + +// ID represents a unique request id +type ID [rawLen]byte + +const ( + encodedLen = 20 // string encoded len + rawLen = 12 // binary raw len + + // encoding stores a custom version of the base32 encoding with lower case + // letters. + encoding = "0123456789abcdefghijklmnopqrstuv" +) + +var ( + // objectIDCounter is atomically incremented when generating a new ObjectId. It's + // used as the counter part of an id. This id is initialized with a random value. + objectIDCounter = randInt() + + // machineID is generated once and used in subsequent calls to the New* functions. + machineID = readMachineID() + + // pid stores the current process id + pid = os.Getpid() + + nilID ID + + // dec is the decoding map for base32 encoding + dec [256]byte +) + +func init() { + for i := 0; i < len(dec); i++ { + dec[i] = 0xFF + } + for i := 0; i < len(encoding); i++ { + dec[encoding[i]] = byte(i) + } + + // If /proc/self/cpuset exists and is not /, we can assume that we are in a + // form of container and use the content of cpuset xor-ed with the PID in + // order get a reasonable machine global unique PID. + b, err := ioutil.ReadFile("/proc/self/cpuset") + if err == nil && len(b) > 1 { + pid ^= int(crc32.ChecksumIEEE(b)) + } +} + +// readMachineID generates a machine ID, derived from a platform-specific machine ID +// value, or else the machine's hostname, or else a randomly-generated number. +// It panics if all of these methods fail. +func readMachineID() []byte { + id := make([]byte, 3) + hid, err := readPlatformMachineID() + if err != nil || len(hid) == 0 { + hid, err = os.Hostname() + } + if err == nil && len(hid) != 0 { + hw := sha256.New() + hw.Write([]byte(hid)) + copy(id, hw.Sum(nil)) + } else { + // Fallback to rand number if machine id can't be gathered + if _, randErr := rand.Reader.Read(id); randErr != nil { + panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) + } + } + return id +} + +// randInt generates a random uint32 +func randInt() uint32 { + b := make([]byte, 3) + if _, err := rand.Reader.Read(b); err != nil { + panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) + } + return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) +} + +// New generates a globally unique ID +func New() ID { + return NewWithTime(time.Now()) +} + +// NewWithTime generates a globally unique ID with the passed in time +func NewWithTime(t time.Time) ID { + var id ID + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) + // Machine ID, 3 bytes + id[4] = machineID[0] + id[5] = machineID[1] + id[6] = machineID[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + id[7] = byte(pid >> 8) + id[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIDCounter, 1) + id[9] = byte(i >> 16) + id[10] = byte(i >> 8) + id[11] = byte(i) + return id +} + +// FromString reads an ID from its string representation +func FromString(id string) (ID, error) { + i := &ID{} + err := i.UnmarshalText([]byte(id)) + return *i, err +} + +// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). +func (id ID) String() string { + text := make([]byte, encodedLen) + encode(text, id[:]) + return string(text) +} + +// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. +func (id ID) Encode(dst []byte) []byte { + encode(dst, id[:]) + return dst +} + +// MarshalText implements encoding/text TextMarshaler interface +func (id ID) MarshalText() ([]byte, error) { + text := make([]byte, encodedLen) + encode(text, id[:]) + return text, nil +} + +// MarshalJSON implements encoding/json Marshaler interface +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsNil() { + return []byte("null"), nil + } + text := make([]byte, encodedLen+2) + encode(text[1:encodedLen+1], id[:]) + text[0], text[encodedLen+1] = '"', '"' + return text, nil +} + +// encode by unrolling the stdlib base32 algorithm + removing all safe checks +func encode(dst, id []byte) { + _ = dst[19] + _ = id[11] + + dst[19] = encoding[(id[11]<<4)&0x1F] + dst[18] = encoding[(id[11]>>1)&0x1F] + dst[17] = encoding[(id[11]>>6)|(id[10]<<2)&0x1F] + dst[16] = encoding[id[10]>>3] + dst[15] = encoding[id[9]&0x1F] + dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] + dst[13] = encoding[(id[8]>>2)&0x1F] + dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] + dst[11] = encoding[(id[7]>>4)|(id[6]<<4)&0x1F] + dst[10] = encoding[(id[6]>>1)&0x1F] + dst[9] = encoding[(id[6]>>6)|(id[5]<<2)&0x1F] + dst[8] = encoding[id[5]>>3] + dst[7] = encoding[id[4]&0x1F] + dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] + dst[5] = encoding[(id[3]>>2)&0x1F] + dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] + dst[3] = encoding[(id[2]>>4)|(id[1]<<4)&0x1F] + dst[2] = encoding[(id[1]>>1)&0x1F] + dst[1] = encoding[(id[1]>>6)|(id[0]<<2)&0x1F] + dst[0] = encoding[id[0]>>3] +} + +// UnmarshalText implements encoding/text TextUnmarshaler interface +func (id *ID) UnmarshalText(text []byte) error { + if len(text) != encodedLen { + return ErrInvalidID + } + for _, c := range text { + if dec[c] == 0xFF { + return ErrInvalidID + } + } + if !decode(id, text) { + *id = nilID + return ErrInvalidID + } + return nil +} + +// UnmarshalJSON implements encoding/json Unmarshaler interface +func (id *ID) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + *id = nilID + return nil + } + // Check the slice length to prevent panic on passing it to UnmarshalText() + if len(b) < 2 { + return ErrInvalidID + } + return id.UnmarshalText(b[1 : len(b)-1]) +} + +// decode by unrolling the stdlib base32 algorithm + customized safe check. +func decode(id *ID, src []byte) bool { + _ = src[19] + _ = id[11] + + id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 + // check the last byte + if encoding[(id[11]<<4)&0x1F] != src[19] { + return false + } + id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 + id[9] = dec[src[14]]<<5 | dec[src[15]] + id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 + id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 + id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 + id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 + id[4] = dec[src[6]]<<5 | dec[src[7]] + id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 + id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 + id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 + id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 + return true +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id[0:4])) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Machine() []byte { + return id[4:7] +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Pid() uint16 { + return binary.BigEndian.Uint16(id[7:9]) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Counter() int32 { + b := id[9:12] + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// Value implements the driver.Valuer interface. +func (id ID) Value() (driver.Value, error) { + if id.IsNil() { + return nil, nil + } + b, err := id.MarshalText() + return string(b), err +} + +// Scan implements the sql.Scanner interface. +func (id *ID) Scan(value interface{}) (err error) { + switch val := value.(type) { + case string: + return id.UnmarshalText([]byte(val)) + case []byte: + return id.UnmarshalText(val) + case nil: + *id = nilID + return nil + default: + return fmt.Errorf("xid: scanning unsupported type: %T", value) + } +} + +// IsNil Returns true if this is a "nil" ID +func (id ID) IsNil() bool { + return id == nilID +} + +// Alias of IsNil +func (id ID) IsZero() bool { + return id.IsNil() +} + +// NilID returns a zero value for `xid.ID`. +func NilID() ID { + return nilID +} + +// Bytes returns the byte array representation of `ID` +func (id ID) Bytes() []byte { + return id[:] +} + +// FromBytes convert the byte array representation of `ID` back to `ID` +func FromBytes(b []byte) (ID, error) { + var id ID + if len(b) != rawLen { + return id, ErrInvalidID + } + copy(id[:], b) + return id, nil +} + +// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. +// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, +// and 1 if current id is greater than the other. +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) +} + +type sorter []ID + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Less(i, j int) bool { + return s[i].Compare(s[j]) < 0 +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Sort sorts an array of IDs inplace. +// It works by wrapping `[]ID` and use `sort.Sort`. +func Sort(ids []ID) { + sort.Sort(sorter(ids)) +} diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE new file mode 100644 index 000000000000..14d60424e88f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go new file mode 100644 index 000000000000..d2a66857be87 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go @@ -0,0 +1,25 @@ +//go:build linux && !appengine && !tinygo +// +build linux,!appengine,!tinygo + +package msgp + +import ( + "os" + "syscall" +) + +func adviseRead(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) +} + +func adviseWrite(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) +} + +func fallocate(f *os.File, sz int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) + if err == syscall.ENOTSUP { + return f.Truncate(sz) + } + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go new file mode 100644 index 000000000000..07f524af7ff5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go @@ -0,0 +1,18 @@ +//go:build (!linux && !tinygo && !windows) || appengine +// +build !linux,!tinygo,!windows appengine + +package msgp + +import ( + "os" +) + +// TODO: darwin, BSD support + +func adviseRead(mem []byte) {} + +func adviseWrite(mem []byte) {} + +func fallocate(f *os.File, sz int64) error { + return f.Truncate(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go new file mode 100644 index 000000000000..6e6afd868782 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/circular.go @@ -0,0 +1,45 @@ +package msgp + +type timer interface { + StartTimer() + StopTimer() +} + +// EndlessReader is an io.Reader +// that loops over the same data +// endlessly. It is used for benchmarking. +type EndlessReader struct { + tb timer + data []byte + offset int +} + +// NewEndlessReader returns a new endless reader. +// Buffer b cannot be empty +func NewEndlessReader(b []byte, tb timer) *EndlessReader { + if len(b) == 0 { + panic("EndlessReader cannot be of zero length") + } + // Double until we reach 4K. + for len(b) < 4<<10 { + b = append(b, b...) + } + return &EndlessReader{tb: tb, data: b, offset: 0} +} + +// Read implements io.Reader. In practice, it +// always returns (len(p), nil), although it +// fills the supplied slice while the benchmark +// timer is stopped. +func (c *EndlessReader) Read(p []byte) (int, error) { + var n int + l := len(p) + m := len(c.data) + nn := copy(p[n:], c.data[c.offset:]) + n += nn + for n < l { + n += copy(p[n:], c.data[:]) + } + c.offset = (c.offset + l) % m + return n, nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go new file mode 100644 index 000000000000..47a8c1834504 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -0,0 +1,151 @@ +// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). +// +// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack +// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code +// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. +// +// This package defines four "families" of functions: +// - AppendXxxx() appends an object to a []byte in MessagePack encoding. +// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. +// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. +// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. +// +// Once a type has satisfied the `Encodable` and `Decodable` interfaces, +// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using +// +// msgp.Encode(io.Writer, msgp.Encodable) +// +// and +// +// msgp.Decode(io.Reader, msgp.Decodable) +// +// There are also methods for converting MessagePack to JSON without +// an explicit de-serialization step. +// +// For additional tips, tricks, and gotchas, please visit +// the wiki at http://github.com/tinylib/msgp +package msgp + +const ( + last4 = 0x0f + first4 = 0xf0 + last5 = 0x1f + first3 = 0xe0 + last7 = 0x7f + + // recursionLimit is the limit of recursive calls. + // This limits the call depth of dynamic code, like Skip and interface conversions. + recursionLimit = 100000 +) + +func isfixint(b byte) bool { + return b>>7 == 0 +} + +func isnfixint(b byte) bool { + return b&first3 == mnfixint +} + +func isfixmap(b byte) bool { + return b&first4 == mfixmap +} + +func isfixarray(b byte) bool { + return b&first4 == mfixarray +} + +func isfixstr(b byte) bool { + return b&first3 == mfixstr +} + +func wfixint(u uint8) byte { + return u & last7 +} + +func rfixint(b byte) uint8 { + return b +} + +func wnfixint(i int8) byte { + return byte(i) | mnfixint +} + +func rnfixint(b byte) int8 { + return int8(b) +} + +func rfixmap(b byte) uint8 { + return b & last4 +} + +func wfixmap(u uint8) byte { + return mfixmap | (u & last4) +} + +func rfixstr(b byte) uint8 { + return b & last5 +} + +func wfixstr(u uint8) byte { + return (u & last5) | mfixstr +} + +func rfixarray(b byte) uint8 { + return (b & last4) +} + +func wfixarray(u uint8) byte { + return (u & last4) | mfixarray +} + +// These are all the byte +// prefixes defined by the +// msgpack standard +const ( + // 0XXXXXXX + mfixint uint8 = 0x00 + + // 111XXXXX + mnfixint uint8 = 0xe0 + + // 1000XXXX + mfixmap uint8 = 0x80 + + // 1001XXXX + mfixarray uint8 = 0x90 + + // 101XXXXX + mfixstr uint8 = 0xa0 + + mnil uint8 = 0xc0 + mfalse uint8 = 0xc2 + mtrue uint8 = 0xc3 + mbin8 uint8 = 0xc4 + mbin16 uint8 = 0xc5 + mbin32 uint8 = 0xc6 + mext8 uint8 = 0xc7 + mext16 uint8 = 0xc8 + mext32 uint8 = 0xc9 + mfloat32 uint8 = 0xca + mfloat64 uint8 = 0xcb + muint8 uint8 = 0xcc + muint16 uint8 = 0xcd + muint32 uint8 = 0xce + muint64 uint8 = 0xcf + mint8 uint8 = 0xd0 + mint16 uint8 = 0xd1 + mint32 uint8 = 0xd2 + mint64 uint8 = 0xd3 + mfixext1 uint8 = 0xd4 + mfixext2 uint8 = 0xd5 + mfixext4 uint8 = 0xd6 + mfixext8 uint8 = 0xd7 + mfixext16 uint8 = 0xd8 + mstr8 uint8 = 0xd9 + mstr16 uint8 = 0xda + mstr32 uint8 = 0xdb + marray16 uint8 = 0xdc + marray32 uint8 = 0xdd + mmap16 uint8 = 0xde + mmap32 uint8 = 0xdf +) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go new file mode 100644 index 000000000000..b473a6f66861 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit.go @@ -0,0 +1,242 @@ +package msgp + +import ( + "math" +) + +// Locate returns a []byte pointing to the field +// in a messagepack map with the provided key. (The returned []byte +// points to a sub-slice of 'raw'; Locate does no allocations.) If the +// key doesn't exist in the map, a zero-length []byte will be returned. +func Locate(key string, raw []byte) []byte { + s, n := locate(raw, key) + return raw[s:n] +} + +// Replace takes a key ("key") in a messagepack map ("raw") +// and replaces its value with the one provided and returns +// the new []byte. The returned []byte may point to the same +// memory as "raw". Replace makes no effort to evaluate the validity +// of the contents of 'val'. It may use up to the full capacity of 'raw.' +// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' +// is not a map. +func Replace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, true) +} + +// CopyReplace works similarly to Replace except that the returned +// byte slice does not point to the same memory as 'raw'. CopyReplace +// returns 'nil' if the field doesn't exist or 'raw' isn't a map. +func CopyReplace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, false) +} + +// Remove removes a key-value pair from 'raw'. It returns +// 'raw' unchanged if the key didn't exist. +func Remove(key string, raw []byte) []byte { + start, end := locateKV(raw, key) + if start == end { + return raw + } + raw = raw[:start+copy(raw[start:], raw[end:])] + return resizeMap(raw, -1) +} + +// HasKey returns whether the map in 'raw' has +// a field with key 'key' +func HasKey(key string, raw []byte) bool { + sz, bts, err := ReadMapHeaderBytes(raw) + if err != nil { + return false + } + var field []byte + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return false + } + if UnsafeString(field) == key { + return true + } + } + return false +} + +func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { + ll := end - start // length of segment to replace + lv := len(val) + + if inplace { + extra := lv - ll + + // fastest case: we're doing + // a 1:1 replacement + if extra == 0 { + copy(raw[start:], val) + return raw + + } else if extra < 0 { + // 'val' smaller than replaced value + // copy in place and shift back + + x := copy(raw[start:], val) + y := copy(raw[start+x:], raw[end:]) + return raw[:start+x+y] + + } else if extra < cap(raw)-len(raw) { + // 'val' less than (cap-len) extra bytes + // copy in place and shift forward + raw = raw[0 : len(raw)+extra] + // shift end forward + copy(raw[end+extra:], raw[end:]) + copy(raw[start:], val) + return raw + } + } + + // we have to allocate new space + out := make([]byte, len(raw)+len(val)-ll) + x := copy(out, raw[:start]) + y := copy(out[x:], val) + copy(out[x+y:], raw[end:]) + return out +} + +// locate does a naive O(n) search for the map key; returns start, end +// (returns 0,0 on error) +func locate(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return + } + + // loop and locate field + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + // start location + l := len(raw) + start = l - len(bts) + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = l - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// locate key AND value +func locateKV(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return 0, 0 + } + + for i := uint32(0); i < sz; i++ { + tmp := len(bts) + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + start = len(raw) - tmp + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = len(raw) - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// delta is delta on map size +func resizeMap(raw []byte, delta int64) []byte { + var sz int64 + switch raw[0] { + case mmap16: + sz = int64(big.Uint16(raw[1:])) + if sz+delta <= math.MaxUint16 { + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[5:], raw[3:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[3:]...) + + case mmap32: + sz = int64(big.Uint32(raw[1:])) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + + default: + sz = int64(rfixmap(raw[0])) + if sz+delta < 16 { + raw[0] = wfixmap(uint8(sz + delta)) + return raw + } else if sz+delta <= math.MaxUint16 { + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[3:], raw[1:]) + raw[0] = mmap16 + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } + if cap(raw)-len(raw) >= 4 { + raw = raw[0 : len(raw)+4] + copy(raw[5:], raw[1:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go new file mode 100644 index 000000000000..a05b0b21c280 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go @@ -0,0 +1,128 @@ +package msgp + +func calcBytespec(v byte) bytespec { + // single byte values + switch v { + + case mnil: + return bytespec{size: 1, extra: constsize, typ: NilType} + case mfalse: + return bytespec{size: 1, extra: constsize, typ: BoolType} + case mtrue: + return bytespec{size: 1, extra: constsize, typ: BoolType} + case mbin8: + return bytespec{size: 2, extra: extra8, typ: BinType} + case mbin16: + return bytespec{size: 3, extra: extra16, typ: BinType} + case mbin32: + return bytespec{size: 5, extra: extra32, typ: BinType} + case mext8: + return bytespec{size: 3, extra: extra8, typ: ExtensionType} + case mext16: + return bytespec{size: 4, extra: extra16, typ: ExtensionType} + case mext32: + return bytespec{size: 6, extra: extra32, typ: ExtensionType} + case mfloat32: + return bytespec{size: 5, extra: constsize, typ: Float32Type} + case mfloat64: + return bytespec{size: 9, extra: constsize, typ: Float64Type} + case muint8: + return bytespec{size: 2, extra: constsize, typ: UintType} + case muint16: + return bytespec{size: 3, extra: constsize, typ: UintType} + case muint32: + return bytespec{size: 5, extra: constsize, typ: UintType} + case muint64: + return bytespec{size: 9, extra: constsize, typ: UintType} + case mint8: + return bytespec{size: 2, extra: constsize, typ: IntType} + case mint16: + return bytespec{size: 3, extra: constsize, typ: IntType} + case mint32: + return bytespec{size: 5, extra: constsize, typ: IntType} + case mint64: + return bytespec{size: 9, extra: constsize, typ: IntType} + case mfixext1: + return bytespec{size: 3, extra: constsize, typ: ExtensionType} + case mfixext2: + return bytespec{size: 4, extra: constsize, typ: ExtensionType} + case mfixext4: + return bytespec{size: 6, extra: constsize, typ: ExtensionType} + case mfixext8: + return bytespec{size: 10, extra: constsize, typ: ExtensionType} + case mfixext16: + return bytespec{size: 18, extra: constsize, typ: ExtensionType} + case mstr8: + return bytespec{size: 2, extra: extra8, typ: StrType} + case mstr16: + return bytespec{size: 3, extra: extra16, typ: StrType} + case mstr32: + return bytespec{size: 5, extra: extra32, typ: StrType} + case marray16: + return bytespec{size: 3, extra: array16v, typ: ArrayType} + case marray32: + return bytespec{size: 5, extra: array32v, typ: ArrayType} + case mmap16: + return bytespec{size: 3, extra: map16v, typ: MapType} + case mmap32: + return bytespec{size: 5, extra: map32v, typ: MapType} + } + + switch { + + // fixint + case v >= mfixint && v < 0x80: + return bytespec{size: 1, extra: constsize, typ: IntType} + + // fixstr gets constsize, since the prefix yields the size + case v >= mfixstr && v < 0xc0: + return bytespec{size: 1 + rfixstr(v), extra: constsize, typ: StrType} + + // fixmap + case v >= mfixmap && v < 0x90: + return bytespec{size: 1, extra: varmode(2 * rfixmap(v)), typ: MapType} + + // fixarray + case v >= mfixarray && v < 0xa0: + return bytespec{size: 1, extra: varmode(rfixarray(v)), typ: ArrayType} + + // nfixint + case v >= mnfixint && uint16(v) < 0x100: + return bytespec{size: 1, extra: constsize, typ: IntType} + + } + + // 0xC1 is unused per the spec and falls through to here, + // everything else is covered above + + return bytespec{} +} + +func getType(v byte) Type { + return getBytespec(v).typ +} + +// a valid bytespsec has +// non-zero 'size' and +// non-zero 'typ' +type bytespec struct { + size uint8 // prefix size information + extra varmode // extra size information + typ Type // type + _ byte // makes bytespec 4 bytes (yes, this matters) +} + +// size mode +// if positive, # elements for composites +type varmode int8 + +const ( + constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) + extra8 varmode = -1 // has uint8(p[1]) extra bytes + extra16 varmode = -2 // has be16(p[1:]) extra bytes + extra32 varmode = -3 // has be32(p[1:]) extra bytes + map16v varmode = -4 // use map16 + map32v varmode = -5 // use map32 + array16v varmode = -6 // use array16 + array32v varmode = -7 // use array32 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize_default.go b/vendor/github.com/tinylib/msgp/msgp/elsize_default.go new file mode 100644 index 000000000000..e7e8b547a935 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize_default.go @@ -0,0 +1,21 @@ +//go:build !tinygo +// +build !tinygo + +package msgp + +// size of every object on the wire, +// plus type information. gives us +// constant-time type information +// for traversing composite objects. +var sizes [256]bytespec + +func init() { + for i := 0; i < 256; i++ { + sizes[i] = calcBytespec(byte(i)) + } +} + +// getBytespec gets inlined to a simple array index +func getBytespec(v byte) bytespec { + return sizes[v] +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go b/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go new file mode 100644 index 000000000000..041f4ad694b1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize_tinygo.go @@ -0,0 +1,13 @@ +//go:build tinygo +// +build tinygo + +package msgp + +// for tinygo, getBytespec just calls calcBytespec +// a simple/slow function with a switch statement - +// doesn't require any heap alloc, moves the space +// requirements into code instad of ram + +func getBytespec(v byte) bytespec { + return calcBytespec(v) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go new file mode 100644 index 000000000000..e6b42b689378 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -0,0 +1,393 @@ +package msgp + +import ( + "reflect" + "strconv" +) + +const resumableDefault = false + +var ( + // ErrShortBytes is returned when the + // slice being decoded is too short to + // contain the contents of the message + ErrShortBytes error = errShort{} + + // ErrRecursion is returned when the maximum recursion limit is reached for an operation. + // This should only realistically be seen on adversarial data trying to exhaust the stack. + ErrRecursion error = errRecursion{} + + // this error is only returned + // if we reach code that should + // be unreachable + fatal error = errFatal{} +) + +// Error is the interface satisfied +// by all of the errors that originate +// from this package. +type Error interface { + error + + // Resumable returns whether + // or not the error means that + // the stream of data is malformed + // and the information is unrecoverable. + Resumable() bool +} + +// contextError allows msgp Error instances to be enhanced with additional +// context about their origin. +type contextError interface { + Error + + // withContext must not modify the error instance - it must clone and + // return a new error with the context added. + withContext(ctx string) error +} + +// Cause returns the underlying cause of an error that has been wrapped +// with additional context. +func Cause(e error) error { + out := e + if e, ok := e.(errWrapped); ok && e.cause != nil { + out = e.cause + } + return out +} + +// Resumable returns whether or not the error means that the stream of data is +// malformed and the information is unrecoverable. +func Resumable(e error) bool { + if e, ok := e.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// WrapError wraps an error with additional context that allows the part of the +// serialized type that caused the problem to be identified. Underlying errors +// can be retrieved using Cause() +// +// The input error is not modified - a new error should be returned. +// +// ErrShortBytes is not wrapped with any context due to backward compatibility +// issues with the public API. +func WrapError(err error, ctx ...interface{}) error { + switch e := err.(type) { + case errShort: + return e + case contextError: + return e.withContext(ctxString(ctx)) + default: + return errWrapped{cause: err, ctx: ctxString(ctx)} + } +} + +func addCtx(ctx, add string) string { + if ctx != "" { + return add + "/" + ctx + } else { + return add + } +} + +// errWrapped allows arbitrary errors passed to WrapError to be enhanced with +// context and unwrapped with Cause() +type errWrapped struct { + cause error + ctx string +} + +func (e errWrapped) Error() string { + if e.ctx != "" { + return e.cause.Error() + " at " + e.ctx + } else { + return e.cause.Error() + } +} + +func (e errWrapped) Resumable() bool { + if e, ok := e.cause.(Error); ok { + return e.Resumable() + } + return resumableDefault +} + +// Unwrap returns the cause. +func (e errWrapped) Unwrap() error { return e.cause } + +type errShort struct{} + +func (e errShort) Error() string { return "msgp: too few bytes left to read object" } +func (e errShort) Resumable() bool { return false } + +type errFatal struct { + ctx string +} + +func (f errFatal) Error() string { + out := "msgp: fatal decoding error (unreachable code)" + if f.ctx != "" { + out += " at " + f.ctx + } + return out +} + +func (f errFatal) Resumable() bool { return false } + +func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } + +type errRecursion struct{} + +func (e errRecursion) Error() string { return "msgp: recursion limit reached" } +func (e errRecursion) Resumable() bool { return false } + +// ArrayError is an error returned +// when decoding a fix-sized array +// of the wrong size +type ArrayError struct { + Wanted uint32 + Got uint32 + ctx string +} + +// Error implements the error interface +func (a ArrayError) Error() string { + out := "msgp: wanted array of size " + strconv.Itoa(int(a.Wanted)) + "; got " + strconv.Itoa(int(a.Got)) + if a.ctx != "" { + out += " at " + a.ctx + } + return out +} + +// Resumable is always 'true' for ArrayErrors +func (a ArrayError) Resumable() bool { return true } + +func (a ArrayError) withContext(ctx string) error { a.ctx = addCtx(a.ctx, ctx); return a } + +// IntOverflow is returned when a call +// would downcast an integer to a type +// with too few bits to hold its value. +type IntOverflow struct { + Value int64 // the value of the integer + FailedBitsize int // the bit size that the int64 could not fit into + ctx string +} + +// Error implements the error interface +func (i IntOverflow) Error() string { + str := "msgp: " + strconv.FormatInt(i.Value, 10) + " overflows int" + strconv.Itoa(i.FailedBitsize) + if i.ctx != "" { + str += " at " + i.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (i IntOverflow) Resumable() bool { return true } + +func (i IntOverflow) withContext(ctx string) error { i.ctx = addCtx(i.ctx, ctx); return i } + +// UintOverflow is returned when a call +// would downcast an unsigned integer to a type +// with too few bits to hold its value +type UintOverflow struct { + Value uint64 // value of the uint + FailedBitsize int // the bit size that couldn't fit the value + ctx string +} + +// Error implements the error interface +func (u UintOverflow) Error() string { + str := "msgp: " + strconv.FormatUint(u.Value, 10) + " overflows uint" + strconv.Itoa(u.FailedBitsize) + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintOverflow) Resumable() bool { return true } + +func (u UintOverflow) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } + +// InvalidTimestamp is returned when an invalid timestamp is encountered +type InvalidTimestamp struct { + Nanos int64 // value of the nano, if invalid + FieldLength int // Unexpected field length. + ctx string +} + +// Error implements the error interface +func (u InvalidTimestamp) Error() (str string) { + if u.Nanos > 0 { + str = "msgp: timestamp nanosecond field value " + strconv.FormatInt(u.Nanos, 10) + " exceeds maximum allows of 999999999" + } else if u.FieldLength >= 0 { + str = "msgp: invalid timestamp field length " + strconv.FormatInt(int64(u.FieldLength), 10) + " - must be 4, 8 or 12" + } + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u InvalidTimestamp) Resumable() bool { return true } + +func (u InvalidTimestamp) withContext(ctx string) error { u.ctx = addCtx(u.ctx, ctx); return u } + +// UintBelowZero is returned when a call +// would cast a signed integer below zero +// to an unsigned integer. +type UintBelowZero struct { + Value int64 // value of the incoming int + ctx string +} + +// Error implements the error interface +func (u UintBelowZero) Error() string { + str := "msgp: attempted to cast int " + strconv.FormatInt(u.Value, 10) + " to unsigned" + if u.ctx != "" { + str += " at " + u.ctx + } + return str +} + +// Resumable is always 'true' for overflows +func (u UintBelowZero) Resumable() bool { return true } + +func (u UintBelowZero) withContext(ctx string) error { + u.ctx = ctx + return u +} + +// A TypeError is returned when a particular +// decoding method is unsuitable for decoding +// a particular MessagePack value. +type TypeError struct { + Method Type // Type expected by method + Encoded Type // Type actually encoded + + ctx string +} + +// Error implements the error interface +func (t TypeError) Error() string { + out := "msgp: attempted to decode type " + quoteStr(t.Encoded.String()) + " with method for " + quoteStr(t.Method.String()) + if t.ctx != "" { + out += " at " + t.ctx + } + return out +} + +// Resumable returns 'true' for TypeErrors +func (t TypeError) Resumable() bool { return true } + +func (t TypeError) withContext(ctx string) error { t.ctx = addCtx(t.ctx, ctx); return t } + +// returns either InvalidPrefixError or +// TypeError depending on whether or not +// the prefix is recognized +func badPrefix(want Type, lead byte) error { + t := getType(lead) + if t == InvalidType { + return InvalidPrefixError(lead) + } + return TypeError{Method: want, Encoded: t} +} + +// InvalidPrefixError is returned when a bad encoding +// uses a prefix that is not recognized in the MessagePack standard. +// This kind of error is unrecoverable. +type InvalidPrefixError byte + +// Error implements the error interface +func (i InvalidPrefixError) Error() string { + return "msgp: unrecognized type prefix 0x" + strconv.FormatInt(int64(i), 16) +} + +// Resumable returns 'false' for InvalidPrefixErrors +func (i InvalidPrefixError) Resumable() bool { return false } + +// ErrUnsupportedType is returned +// when a bad argument is supplied +// to a function that takes `interface{}`. +type ErrUnsupportedType struct { + T reflect.Type + + ctx string +} + +// Error implements error +func (e *ErrUnsupportedType) Error() string { + out := "msgp: type " + quoteStr(e.T.String()) + " not supported" + if e.ctx != "" { + out += " at " + e.ctx + } + return out +} + +// Resumable returns 'true' for ErrUnsupportedType +func (e *ErrUnsupportedType) Resumable() bool { return true } + +func (e *ErrUnsupportedType) withContext(ctx string) error { + o := *e + o.ctx = addCtx(o.ctx, ctx) + return &o +} + +// simpleQuoteStr is a simplified version of strconv.Quote for TinyGo, +// which takes up a lot less code space by escaping all non-ASCII +// (UTF-8) bytes with \x. Saves about 4k of code size +// (unicode tables, needed for IsPrint(), are big). +// It lives in errors.go just so we can test it in errors_test.go +func simpleQuoteStr(s string) string { + const ( + lowerhex = "0123456789abcdef" + ) + + sb := make([]byte, 0, len(s)+2) + + sb = append(sb, `"`...) + +l: // loop through string bytes (not UTF-8 characters) + for i := 0; i < len(s); i++ { + b := s[i] + // specific escape chars + switch b { + case '\\': + sb = append(sb, `\\`...) + case '"': + sb = append(sb, `\"`...) + case '\a': + sb = append(sb, `\a`...) + case '\b': + sb = append(sb, `\b`...) + case '\f': + sb = append(sb, `\f`...) + case '\n': + sb = append(sb, `\n`...) + case '\r': + sb = append(sb, `\r`...) + case '\t': + sb = append(sb, `\t`...) + case '\v': + sb = append(sb, `\v`...) + default: + // no escaping needed (printable ASCII) + if b >= 0x20 && b <= 0x7E { + sb = append(sb, b) + continue l + } + // anything else is \x + sb = append(sb, `\x`...) + sb = append(sb, lowerhex[byte(b)>>4]) + sb = append(sb, lowerhex[byte(b)&0xF]) + continue l + } + } + + sb = append(sb, `"`...) + return string(sb) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors_default.go b/vendor/github.com/tinylib/msgp/msgp/errors_default.go new file mode 100644 index 000000000000..e45c00a8b818 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors_default.go @@ -0,0 +1,25 @@ +//go:build !tinygo +// +build !tinygo + +package msgp + +import ( + "fmt" + "strconv" +) + +// ctxString converts the incoming interface{} slice into a single string. +func ctxString(ctx []interface{}) string { + out := "" + for idx, cv := range ctx { + if idx > 0 { + out += "/" + } + out += fmt.Sprintf("%v", cv) + } + return out +} + +func quoteStr(s string) string { + return strconv.Quote(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go b/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go new file mode 100644 index 000000000000..8691cd387ea6 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors_tinygo.go @@ -0,0 +1,42 @@ +//go:build tinygo +// +build tinygo + +package msgp + +import ( + "reflect" +) + +// ctxString converts the incoming interface{} slice into a single string, +// without using fmt under tinygo +func ctxString(ctx []interface{}) string { + out := "" + for idx, cv := range ctx { + if idx > 0 { + out += "/" + } + out += ifToStr(cv) + } + return out +} + +type stringer interface { + String() string +} + +func ifToStr(i interface{}) string { + switch v := i.(type) { + case stringer: + return v.String() + case error: + return v.Error() + case string: + return v + default: + return reflect.ValueOf(i).String() + } +} + +func quoteStr(s string) string { + return simpleQuoteStr(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go new file mode 100644 index 000000000000..cda71c984058 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension.go @@ -0,0 +1,561 @@ +package msgp + +import ( + "errors" + "math" + "strconv" +) + +const ( + // Complex64Extension is the extension number used for complex64 + Complex64Extension = 3 + + // Complex128Extension is the extension number used for complex128 + Complex128Extension = 4 + + // TimeExtension is the extension number used for time.Time + TimeExtension = 5 + + // MsgTimeExtension is the extension number for timestamps as defined in + // https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type + MsgTimeExtension = -1 +) + +// msgTimeExtension is a painful workaround to avoid "constant -1 overflows byte". +var msgTimeExtension = int8(MsgTimeExtension) + +// our extensions live here +var extensionReg = make(map[int8]func() Extension) + +// RegisterExtension registers extensions so that they +// can be initialized and returned by methods that +// decode `interface{}` values. This should only +// be called during initialization. f() should return +// a newly-initialized zero value of the extension. Keep in +// mind that extensions 3, 4, and 5 are reserved for +// complex64, complex128, and time.Time, respectively, +// and that MessagePack reserves extension types from -127 to -1. +// +// For example, if you wanted to register a user-defined struct: +// +// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) +// +// RegisterExtension will panic if you call it multiple times +// with the same 'typ' argument, or if you use a reserved +// type (3, 4, or 5). +func RegisterExtension(typ int8, f func() Extension) { + switch typ { + case Complex64Extension, Complex128Extension, TimeExtension: + panic(errors.New("msgp: forbidden extension type: " + strconv.Itoa(int(typ)))) + } + if _, ok := extensionReg[typ]; ok { + panic(errors.New("msgp: RegisterExtension() called with typ " + strconv.Itoa(int(typ)) + " more than once")) + } + extensionReg[typ] = f +} + +// ExtensionTypeError is an error type returned +// when there is a mis-match between an extension type +// and the type encoded on the wire +type ExtensionTypeError struct { + Got int8 + Want int8 +} + +// Error implements the error interface +func (e ExtensionTypeError) Error() string { + return "msgp: error decoding extension: wanted type " + strconv.Itoa(int(e.Want)) + "; got type " + strconv.Itoa(int(e.Got)) +} + +// Resumable returns 'true' for ExtensionTypeErrors +func (e ExtensionTypeError) Resumable() bool { return true } + +func errExt(got int8, wanted int8) error { + return ExtensionTypeError{Got: got, Want: wanted} +} + +// Extension is the interface fulfilled +// by types that want to define their +// own binary encoding. +type Extension interface { + // ExtensionType should return + // a int8 that identifies the concrete + // type of the extension. (Types <0 are + // officially reserved by the MessagePack + // specifications.) + ExtensionType() int8 + + // Len should return the length + // of the data to be encoded + Len() int + + // MarshalBinaryTo should copy + // the data into the supplied slice, + // assuming that the slice has length Len() + MarshalBinaryTo([]byte) error + + UnmarshalBinary([]byte) error +} + +// RawExtension implements the Extension interface +type RawExtension struct { + Data []byte + Type int8 +} + +// ExtensionType implements Extension.ExtensionType, and returns r.Type +func (r *RawExtension) ExtensionType() int8 { return r.Type } + +// Len implements Extension.Len, and returns len(r.Data) +func (r *RawExtension) Len() int { return len(r.Data) } + +// MarshalBinaryTo implements Extension.MarshalBinaryTo, +// and returns a copy of r.Data +func (r *RawExtension) MarshalBinaryTo(d []byte) error { + copy(d, r.Data) + return nil +} + +// UnmarshalBinary implements Extension.UnmarshalBinary, +// and sets r.Data to the contents of the provided slice +func (r *RawExtension) UnmarshalBinary(b []byte) error { + if cap(r.Data) >= len(b) { + r.Data = r.Data[0:len(b)] + } else { + r.Data = make([]byte, len(b)) + } + copy(r.Data, b) + return nil +} + +func (mw *Writer) writeExtensionHeader(length int, extType int8) error { + switch length { + case 0: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 0 + mw.buf[o+2] = byte(extType) + case 1: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext1 + mw.buf[o+1] = byte(extType) + case 2: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext2 + mw.buf[o+1] = byte(extType) + case 4: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(extType) + case 8: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(extType) + case 16: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = byte(extType) + default: + switch { + case length < math.MaxUint8: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = byte(uint8(length)) + mw.buf[o+2] = byte(extType) + case length < math.MaxUint16: + o, err := mw.require(4) + if err != nil { + return err + } + mw.buf[o] = mext16 + big.PutUint16(mw.buf[o+1:], uint16(length)) + mw.buf[o+3] = byte(extType) + default: + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mext32 + big.PutUint32(mw.buf[o+1:], uint32(length)) + mw.buf[o+5] = byte(extType) + } + } + + return nil +} + +// WriteExtension writes an extension type to the writer +func (mw *Writer) WriteExtension(e Extension) error { + length := e.Len() + + err := mw.writeExtensionHeader(length, e.ExtensionType()) + if err != nil { + return err + } + + // we can only write directly to the + // buffer if we're sure that it + // fits the object + if length <= mw.bufsize() { + o, err := mw.require(length) + if err != nil { + return err + } + return e.MarshalBinaryTo(mw.buf[o:]) + } + // here we create a new buffer + // just large enough for the body + // and save it as the write buffer + err = mw.flush() + if err != nil { + return err + } + buf := make([]byte, length) + err = e.MarshalBinaryTo(buf) + if err != nil { + return err + } + mw.buf = buf + mw.wloc = length + return nil +} + +// WriteExtensionRaw writes an extension type to the writer +func (mw *Writer) WriteExtensionRaw(extType int8, payload []byte) error { + if err := mw.writeExtensionHeader(len(payload), extType); err != nil { + return err + } + + // instead of using mw.Write(), we'll copy the data through the internal + // buffer, otherwise the payload would be moved to the heap + // (meaning we can use stack-allocated buffers with zero allocations) + for len(payload) > 0 { + chunkSize := mw.avail() + if chunkSize == 0 { + if err := mw.flush(); err != nil { + return err + } + chunkSize = mw.avail() + } + if chunkSize > len(payload) { + chunkSize = len(payload) + } + + mw.wloc += copy(mw.buf[mw.wloc:], payload[:chunkSize]) + payload = payload[chunkSize:] + } + + return nil +} + +// peek at the extension type, assuming the next +// kind to be read is Extension +func (m *Reader) peekExtensionType() (int8, error) { + _, _, extType, err := m.peekExtensionHeader() + + return extType, err +} + +// peekExtension peeks at the extension encoding type +// (must guarantee at least 1 byte in 'b') +func peekExtension(b []byte) (int8, error) { + spec := getBytespec(b[0]) + size := spec.size + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, b[0]) + } + if len(b) < int(size) { + return 0, ErrShortBytes + } + // for fixed extensions, + // the type information is in + // the second byte + if spec.extra == constsize { + return int8(b[1]), nil + } + // otherwise, it's in the last + // part of the prefix + return int8(b[size-1]), nil +} + +func (m *Reader) peekExtensionHeader() (offset int, length int, extType int8, err error) { + var p []byte + p, err = m.R.Peek(2) + if err != nil { + return + } + + offset = 2 + + lead := p[0] + switch lead { + case mfixext1: + extType = int8(p[1]) + length = 1 + return + + case mfixext2: + extType = int8(p[1]) + length = 2 + return + + case mfixext4: + extType = int8(p[1]) + length = 4 + return + + case mfixext8: + extType = int8(p[1]) + length = 8 + return + + case mfixext16: + extType = int8(p[1]) + length = 16 + return + + case mext8: + p, err = m.R.Peek(3) + if err != nil { + return + } + offset = 3 + extType = int8(p[2]) + length = int(uint8(p[1])) + + case mext16: + p, err = m.R.Peek(4) + if err != nil { + return + } + offset = 4 + extType = int8(p[3]) + length = int(big.Uint16(p[1:])) + + case mext32: + p, err = m.R.Peek(6) + if err != nil { + return + } + offset = 6 + extType = int8(p[5]) + length = int(big.Uint32(p[1:])) + + default: + err = badPrefix(ExtensionType, lead) + return + } + + return +} + +// ReadExtension reads the next object from the reader +// as an extension. ReadExtension will fail if the next +// object in the stream is not an extension, or if +// e.Type() is not the same as the wire type. +func (m *Reader) ReadExtension(e Extension) error { + offset, length, extType, err := m.peekExtensionHeader() + if err != nil { + return err + } + + if expectedType := e.ExtensionType(); extType != expectedType { + return errExt(extType, expectedType) + } + + p, err := m.R.Peek(offset + length) + if err != nil { + return err + } + err = e.UnmarshalBinary(p[offset:]) + if err == nil { + // consume the peeked bytes + _, err = m.R.Skip(offset + length) + } + return err +} + +// ReadExtensionRaw reads the next object from the reader +// as an extension. The returned slice is only +// valid until the next *Reader method call. +func (m *Reader) ReadExtensionRaw() (int8, []byte, error) { + offset, length, extType, err := m.peekExtensionHeader() + if err != nil { + return 0, nil, err + } + + payload, err := m.R.Next(offset + length) + if err != nil { + return 0, nil, err + } + + return extType, payload[offset:], nil +} + +// AppendExtension appends a MessagePack extension to the provided slice +func AppendExtension(b []byte, e Extension) ([]byte, error) { + l := e.Len() + var o []byte + var n int + switch l { + case 0: + o, n = ensure(b, 3) + o[n] = mext8 + o[n+1] = 0 + o[n+2] = byte(e.ExtensionType()) + return o[:n+3], nil + case 1: + o, n = ensure(b, 3) + o[n] = mfixext1 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 2: + o, n = ensure(b, 4) + o[n] = mfixext2 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 4: + o, n = ensure(b, 6) + o[n] = mfixext4 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 8: + o, n = ensure(b, 10) + o[n] = mfixext8 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 16: + o, n = ensure(b, 18) + o[n] = mfixext16 + o[n+1] = byte(e.ExtensionType()) + n += 2 + default: + switch { + case l < math.MaxUint8: + o, n = ensure(b, l+3) + o[n] = mext8 + o[n+1] = byte(uint8(l)) + o[n+2] = byte(e.ExtensionType()) + n += 3 + case l < math.MaxUint16: + o, n = ensure(b, l+4) + o[n] = mext16 + big.PutUint16(o[n+1:], uint16(l)) + o[n+3] = byte(e.ExtensionType()) + n += 4 + default: + o, n = ensure(b, l+6) + o[n] = mext32 + big.PutUint32(o[n+1:], uint32(l)) + o[n+5] = byte(e.ExtensionType()) + n += 6 + } + } + return o, e.MarshalBinaryTo(o[n:]) +} + +// ReadExtensionBytes reads an extension from 'b' into 'e' +// and returns any remaining bytes. +// Possible errors: +// - ErrShortBytes ('b' not long enough) +// - ExtensionTypeError{} (wire type not the same as e.Type()) +// - TypeError{} (next object not an extension) +// - InvalidPrefixError +// - An umarshal error returned from e.UnmarshalBinary +func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { + typ, remain, data, err := readExt(b) + if err != nil { + return b, err + } + if typ != e.ExtensionType() { + return b, errExt(typ, e.ExtensionType()) + } + return remain, e.UnmarshalBinary(data) +} + +// readExt will read the extension type, and return remaining bytes, +// as well as the data of the extension. +func readExt(b []byte) (typ int8, remain []byte, data []byte, err error) { + l := len(b) + if l < 3 { + return 0, b, nil, ErrShortBytes + } + lead := b[0] + var ( + sz int // size of 'data' + off int // offset of 'data' + ) + switch lead { + case mfixext1: + typ = int8(b[1]) + sz = 1 + off = 2 + case mfixext2: + typ = int8(b[1]) + sz = 2 + off = 2 + case mfixext4: + typ = int8(b[1]) + sz = 4 + off = 2 + case mfixext8: + typ = int8(b[1]) + sz = 8 + off = 2 + case mfixext16: + typ = int8(b[1]) + sz = 16 + off = 2 + case mext8: + sz = int(uint8(b[1])) + typ = int8(b[2]) + off = 3 + if sz == 0 { + return typ, b[3:], b[3:3], nil + } + case mext16: + if l < 4 { + return 0, b, nil, ErrShortBytes + } + sz = int(big.Uint16(b[1:])) + typ = int8(b[3]) + off = 4 + case mext32: + if l < 6 { + return 0, b, nil, ErrShortBytes + } + sz = int(big.Uint32(b[1:])) + typ = int8(b[5]) + off = 6 + default: + return 0, b, nil, badPrefix(ExtensionType, lead) + } + // the data of the extension starts + // at 'off' and is 'sz' bytes long + tot := off + sz + if len(b[off:]) < sz { + return 0, b, nil, ErrShortBytes + } + return typ, b[tot:], b[off:tot:tot], nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go new file mode 100644 index 000000000000..a6d91ede14e5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -0,0 +1,93 @@ +//go:build (linux || darwin || dragonfly || freebsd || illumos || netbsd || openbsd) && !appengine && !tinygo +// +build linux darwin dragonfly freebsd illumos netbsd openbsd +// +build !appengine +// +build !tinygo + +package msgp + +import ( + "os" + "syscall" +) + +// ReadFile reads a file into 'dst' using +// a read-only memory mapping. Consequently, +// the file must be mmap-able, and the +// Unmarshaler should never write to +// the source memory. (Methods generated +// by the msgp tool obey that constraint, but +// user-defined implementations may not.) +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +func ReadFile(dst Unmarshaler, file *os.File) error { + stat, err := file.Stat() + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseRead(data) + _, err = dst.UnmarshalMsg(data) + uerr := syscall.Munmap(data) + if err == nil { + err = uerr + } + return err +} + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +// WriteFile writes a file from 'src' using +// memory mapping. It overwrites the entire +// contents of the previous file. +// The mapping size is calculated +// using the `Msgsize()` method +// of 'src', so it must produce a result +// equal to or greater than the actual encoded +// size of the object. Otherwise, +// a fault (SIGBUS) will occur. +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +// NOTE: The performance of this call +// is highly OS- and filesystem-dependent. +// Users should take care to test that this +// performs as expected in a production environment. +// (Linux users should run a kernel and filesystem +// that support fallocate(2) for the best results.) +func WriteFile(src MarshalSizer, file *os.File) error { + sz := src.Msgsize() + err := fallocate(file, int64(sz)) + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseWrite(data) + chunk := data[:0] + chunk, err = src.MarshalMsg(chunk) + if err != nil { + return err + } + uerr := syscall.Munmap(data) + if uerr != nil { + return uerr + } + return file.Truncate(int64(len(chunk))) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go new file mode 100644 index 000000000000..dac0dba3fa6b --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -0,0 +1,48 @@ +//go:build windows || appengine || tinygo +// +build windows appengine tinygo + +package msgp + +import ( + "io" + "os" +) + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +func ReadFile(dst Unmarshaler, file *os.File) error { + if u, ok := dst.(Decodable); ok { + return u.DecodeMsg(NewReader(file)) + } + + data, err := io.ReadAll(file) + if err != nil { + return err + } + _, err = dst.UnmarshalMsg(data) + return err +} + +func WriteFile(src MarshalSizer, file *os.File) error { + if e, ok := src.(Encodable); ok { + w := NewWriter(file) + err := e.EncodeMsg(w) + if err == nil { + err = w.Flush() + } + return err + } + + raw, err := src.MarshalMsg(nil) + if err != nil { + return err + } + _, err = file.Write(raw) + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go new file mode 100644 index 000000000000..d07a5fba7f37 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -0,0 +1,199 @@ +package msgp + +import "encoding/binary" + +/* ---------------------------------- + integer encoding utilities + (inline-able) + + TODO(tinylib): there are faster, + albeit non-portable solutions + to the code below. implement + byteswap? + ---------------------------------- */ + +func putMint64(b []byte, i int64) { + _ = b[8] // bounds check elimination + + b[0] = mint64 + b[1] = byte(i >> 56) + b[2] = byte(i >> 48) + b[3] = byte(i >> 40) + b[4] = byte(i >> 32) + b[5] = byte(i >> 24) + b[6] = byte(i >> 16) + b[7] = byte(i >> 8) + b[8] = byte(i) +} + +func getMint64(b []byte) int64 { + _ = b[8] // bounds check elimination + + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | + (int64(b[3]) << 40) | (int64(b[4]) << 32) | + (int64(b[5]) << 24) | (int64(b[6]) << 16) | + (int64(b[7]) << 8) | (int64(b[8])) +} + +func putMint32(b []byte, i int32) { + _ = b[4] // bounds check elimination + + b[0] = mint32 + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) +} + +func getMint32(b []byte) int32 { + _ = b[4] // bounds check elimination + + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) +} + +func putMint16(b []byte, i int16) { + _ = b[2] // bounds check elimination + + b[0] = mint16 + b[1] = byte(i >> 8) + b[2] = byte(i) +} + +func getMint16(b []byte) (i int16) { + _ = b[2] // bounds check elimination + + return (int16(b[1]) << 8) | int16(b[2]) +} + +func putMint8(b []byte, i int8) { + _ = b[1] // bounds check elimination + + b[0] = mint8 + b[1] = byte(i) +} + +func getMint8(b []byte) (i int8) { + return int8(b[1]) +} + +func putMuint64(b []byte, u uint64) { + _ = b[8] // bounds check elimination + + b[0] = muint64 + b[1] = byte(u >> 56) + b[2] = byte(u >> 48) + b[3] = byte(u >> 40) + b[4] = byte(u >> 32) + b[5] = byte(u >> 24) + b[6] = byte(u >> 16) + b[7] = byte(u >> 8) + b[8] = byte(u) +} + +func getMuint64(b []byte) uint64 { + _ = b[8] // bounds check elimination + + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | + (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | + (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | + (uint64(b[7]) << 8) | (uint64(b[8])) +} + +func putMuint32(b []byte, u uint32) { + _ = b[4] // bounds check elimination + + b[0] = muint32 + b[1] = byte(u >> 24) + b[2] = byte(u >> 16) + b[3] = byte(u >> 8) + b[4] = byte(u) +} + +func getMuint32(b []byte) uint32 { + _ = b[4] // bounds check elimination + + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) +} + +func putMuint16(b []byte, u uint16) { + _ = b[2] // bounds check elimination + + b[0] = muint16 + b[1] = byte(u >> 8) + b[2] = byte(u) +} + +func getMuint16(b []byte) uint16 { + _ = b[2] // bounds check elimination + + return (uint16(b[1]) << 8) | uint16(b[2]) +} + +func putMuint8(b []byte, u uint8) { + _ = b[1] // bounds check elimination + + b[0] = muint8 + b[1] = byte(u) +} + +func getMuint8(b []byte) uint8 { + return uint8(b[1]) +} + +func getUnix(b []byte) (sec int64, nsec int32) { + sec = int64(binary.BigEndian.Uint64(b)) + nsec = int32(binary.BigEndian.Uint32(b[8:])) + + return +} + +func putUnix(b []byte, sec int64, nsec int32) { + binary.BigEndian.PutUint64(b, uint64(sec)) + binary.BigEndian.PutUint32(b[8:], uint32(nsec)) +} + +/* ----------------------------- + prefix utilities + ----------------------------- */ + +// write prefix and uint8 +func prefixu8(b []byte, pre byte, sz uint8) { + _ = b[1] // bounds check elimination + + b[0] = pre + b[1] = byte(sz) +} + +// write prefix and big-endian uint16 +func prefixu16(b []byte, pre byte, sz uint16) { + _ = b[2] // bounds check elimination + + b[0] = pre + b[1] = byte(sz >> 8) + b[2] = byte(sz) +} + +// write prefix and big-endian uint32 +func prefixu32(b []byte, pre byte, sz uint32) { + _ = b[4] // bounds check elimination + + b[0] = pre + b[1] = byte(sz >> 24) + b[2] = byte(sz >> 16) + b[3] = byte(sz >> 8) + b[4] = byte(sz) +} + +func prefixu64(b []byte, pre byte, sz uint64) { + _ = b[8] // bounds check elimination + + b[0] = pre + b[1] = byte(sz >> 56) + b[2] = byte(sz >> 48) + b[3] = byte(sz >> 40) + b[4] = byte(sz >> 32) + b[5] = byte(sz >> 24) + b[6] = byte(sz >> 16) + b[7] = byte(sz >> 8) + b[8] = byte(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go new file mode 100644 index 000000000000..18593f64d5ad --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -0,0 +1,580 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "unicode/utf8" +) + +var ( + null = []byte("null") + hex = []byte("0123456789abcdef") +) + +var defuns [_maxtype]func(jsWriter, *Reader) (int, error) + +// note: there is an initialization loop if +// this isn't set up during init() +func init() { + // since none of these functions are inline-able, + // there is not much of a penalty to the indirect + // call. however, this is best expressed as a jump-table... + defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ + StrType: rwString, + BinType: rwBytes, + MapType: rwMap, + ArrayType: rwArray, + Float64Type: rwFloat64, + Float32Type: rwFloat32, + BoolType: rwBool, + IntType: rwInt, + UintType: rwUint, + NilType: rwNil, + ExtensionType: rwExtension, + Complex64Type: rwExtension, + Complex128Type: rwExtension, + TimeType: rwTime, + } +} + +// this is the interface +// used to write json +type jsWriter interface { + io.Writer + io.ByteWriter + WriteString(string) (int, error) +} + +// CopyToJSON reads MessagePack from 'src' and copies it +// as JSON to 'dst' until EOF. +func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { + r := NewReader(src) + n, err = r.WriteToJSON(dst) + freeR(r) + return +} + +// WriteToJSON translates MessagePack from 'r' and writes it as +// JSON to 'w' until the underlying reader returns io.EOF. It returns +// the number of bytes written, and an error if it stopped before EOF. +func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { + var j jsWriter + var bf *bufio.Writer + if jsw, ok := w.(jsWriter); ok { + j = jsw + } else { + bf = bufio.NewWriter(w) + j = bf + } + var nn int + for err == nil { + nn, err = rwNext(j, r) + n += int64(nn) + } + if err != io.EOF { + if bf != nil { + bf.Flush() + } + return + } + err = nil + if bf != nil { + err = bf.Flush() + } + return +} + +func rwNext(w jsWriter, src *Reader) (int, error) { + t, err := src.NextType() + if err != nil { + return 0, err + } + return defuns[t](w, src) +} + +func rwMap(dst jsWriter, src *Reader) (n int, err error) { + var comma bool + var sz uint32 + var field []byte + + sz, err = src.ReadMapHeader() + if err != nil { + return + } + + if sz == 0 { + return dst.WriteString("{}") + } + + // This is potentially a recursive call. + if done, err := src.recursiveCall(); err != nil { + return 0, err + } else { + defer done() + } + + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + var nn int + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + + field, err = src.ReadMapKeyPtr() + if err != nil { + return + } + nn, err = rwquoted(dst, field) + n += nn + if err != nil { + return + } + + err = dst.WriteByte(':') + if err != nil { + return + } + n++ + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + if !comma { + comma = true + } + } + + err = dst.WriteByte('}') + if err != nil { + return + } + n++ + return +} + +func rwArray(dst jsWriter, src *Reader) (n int, err error) { + err = dst.WriteByte('[') + if err != nil { + return + } + // This is potentially a recursive call. + if done, err := src.recursiveCall(); err != nil { + return 0, err + } else { + defer done() + } + + var sz uint32 + var nn int + sz, err = src.ReadArrayHeader() + if err != nil { + return + } + comma := false + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + comma = true + } + + err = dst.WriteByte(']') + if err != nil { + return + } + n++ + return +} + +func rwNil(dst jsWriter, src *Reader) (int, error) { + err := src.ReadNil() + if err != nil { + return 0, err + } + return dst.Write(null) +} + +func rwFloat32(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat32() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 32) + return dst.Write(src.scratch) +} + +func rwFloat64(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 64) + return dst.Write(src.scratch) +} + +func rwInt(dst jsWriter, src *Reader) (int, error) { + i, err := src.ReadInt64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) + return dst.Write(src.scratch) +} + +func rwUint(dst jsWriter, src *Reader) (int, error) { + u, err := src.ReadUint64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) + return dst.Write(src.scratch) +} + +func rwBool(dst jsWriter, src *Reader) (int, error) { + b, err := src.ReadBool() + if err != nil { + return 0, err + } + if b { + return dst.WriteString("true") + } + return dst.WriteString("false") +} + +func rwTime(dst jsWriter, src *Reader) (int, error) { + t, err := src.ReadTime() + if err != nil { + return 0, err + } + bts, err := t.MarshalJSON() + if err != nil { + return 0, err + } + return dst.Write(bts) +} + +func rwExtension(dst jsWriter, src *Reader) (n int, err error) { + et, err := src.peekExtensionType() + if err != nil { + return 0, err + } + + // registered extensions can override + // the JSON encoding + if j, ok := extensionReg[et]; ok { + var bts []byte + e := j() + err = src.ReadExtension(e) + if err != nil { + return + } + bts, err = json.Marshal(e) + if err != nil { + return + } + return dst.Write(bts) + } + + e := RawExtension{} + e.Type = et + err = src.ReadExtension(&e) + if err != nil { + return + } + + var nn int + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + + nn, err = dst.WriteString(`"type":`) + n += nn + if err != nil { + return + } + + src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) + nn, err = dst.Write(src.scratch) + n += nn + if err != nil { + return + } + + nn, err = dst.WriteString(`,"data":"`) + n += nn + if err != nil { + return + } + + enc := base64.NewEncoder(base64.StdEncoding, dst) + + nn, err = enc.Write(e.Data) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + nn, err = dst.WriteString(`"}`) + n += nn + return +} + +func rwString(dst jsWriter, src *Reader) (n int, err error) { + lead, err := src.R.PeekByte() + if err != nil { + return + } + var read int + var p []byte + if isfixstr(lead) { + read = int(rfixstr(lead)) + src.R.Skip(1) + goto write + } + + switch lead { + case mstr8: + p, err = src.R.Next(2) + if err != nil { + return + } + read = int(uint8(p[1])) + case mstr16: + p, err = src.R.Next(3) + if err != nil { + return + } + read = int(big.Uint16(p[1:])) + case mstr32: + p, err = src.R.Next(5) + if err != nil { + return + } + read = int(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +write: + p, err = src.R.Next(read) + if err != nil { + return + } + n, err = rwquoted(dst, p) + return +} + +func rwBytes(dst jsWriter, src *Reader) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + src.scratch, err = src.ReadBytes(src.scratch[:0]) + if err != nil { + return + } + enc := base64.NewEncoder(base64.StdEncoding, dst) + nn, err = enc.Write(src.scratch) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} + +// Below (c) The Go Authors, 2009-2014 +// Subject to the BSD-style license found at http://golang.org +// +// see: encoding/json/encode.go:(*encodeState).stringbytes() +func rwquoted(dst jsWriter, s []byte) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + switch b { + case '\\', '"': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte(b) + if err != nil { + return + } + n++ + case '\n': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('n') + if err != nil { + return + } + n++ + case '\r': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('r') + if err != nil { + return + } + n++ + case '\t': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('t') + if err != nil { + return + } + n++ + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // It also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + nn, err = dst.WriteString(`\u00`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[b>>4]) + if err != nil { + return + } + n++ + err = dst.WriteByte(hex[b&0xF]) + if err != nil { + return + } + n++ + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + i += size + start = i + continue + } + i += size + } + if start < len(s) { + nn, err = dst.Write(s[start:]) + n += nn + if err != nil { + return + } + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go new file mode 100644 index 000000000000..d4fbda631538 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -0,0 +1,347 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "time" +) + +var unfuns [_maxtype]func(jsWriter, []byte, []byte, int) ([]byte, []byte, error) + +func init() { + // NOTE(pmh): this is best expressed as a jump table, + // but gc doesn't do that yet. revisit post-go1.5. + unfuns = [_maxtype]func(jsWriter, []byte, []byte, int) ([]byte, []byte, error){ + StrType: rwStringBytes, + BinType: rwBytesBytes, + MapType: rwMapBytes, + ArrayType: rwArrayBytes, + Float64Type: rwFloat64Bytes, + Float32Type: rwFloat32Bytes, + BoolType: rwBoolBytes, + IntType: rwIntBytes, + UintType: rwUintBytes, + NilType: rwNullBytes, + ExtensionType: rwExtensionBytes, + Complex64Type: rwExtensionBytes, + Complex128Type: rwExtensionBytes, + TimeType: rwTimeBytes, + } +} + +// UnmarshalAsJSON takes raw messagepack and writes +// it as JSON to 'w'. If an error is returned, the +// bytes not translated will also be returned. If +// no errors are encountered, the length of the returned +// slice will be zero. +func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { + var ( + scratch []byte + cast bool + dst jsWriter + err error + ) + if jsw, ok := w.(jsWriter); ok { + dst = jsw + cast = true + } else { + dst = bufio.NewWriterSize(w, 512) + } + for len(msg) > 0 && err == nil { + msg, scratch, err = writeNext(dst, msg, scratch, 0) + } + if !cast && err == nil { + err = dst.(*bufio.Writer).Flush() + } + return msg, err +} + +func writeNext(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + if len(msg) < 1 { + return msg, scratch, ErrShortBytes + } + t := getType(msg[0]) + if t == InvalidType { + return msg, scratch, InvalidPrefixError(msg[0]) + } + if t == ExtensionType { + et, err := peekExtension(msg) + if err != nil { + return nil, scratch, err + } + if et == TimeExtension || et == MsgTimeExtension { + t = TimeType + } + } + return unfuns[t](w, msg, scratch, depth) +} + +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + if depth >= recursionLimit { + return msg, scratch, ErrRecursion + } + sz, msg, err := ReadArrayHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('[') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = writeNext(w, msg, scratch, depth+1) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte(']') + return msg, scratch, err +} + +func rwMapBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + if depth >= recursionLimit { + return msg, scratch, ErrRecursion + } + sz, msg, err := ReadMapHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('{') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = rwMapKeyBytes(w, msg, scratch, depth) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte(':') + if err != nil { + return msg, scratch, err + } + msg, scratch, err = writeNext(w, msg, scratch, depth+1) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte('}') + return msg, scratch, err +} + +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch, depth) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return rwBytesBytes(w, msg, scratch, depth) + } + } + return msg, scratch, err +} + +func rwStringBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + str, msg, err := ReadStringZC(msg) + if err != nil { + return msg, scratch, err + } + _, err = rwquoted(w, str) + return msg, scratch, err +} + +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + bts, msg, err := ReadBytesZC(msg) + if err != nil { + return msg, scratch, err + } + l := base64.StdEncoding.EncodedLen(len(bts)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, bts) + err = w.WriteByte('"') + if err != nil { + return msg, scratch, err + } + _, err = w.Write(scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('"') + return msg, scratch, err +} + +func rwNullBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + msg, err := ReadNilBytes(msg) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(null) + return msg, scratch, err +} + +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + b, msg, err := ReadBoolBytes(msg) + if err != nil { + return msg, scratch, err + } + if b { + _, err = w.WriteString("true") + return msg, scratch, err + } + _, err = w.WriteString("false") + return msg, scratch, err +} + +func rwIntBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + i, msg, err := ReadInt64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], i, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwUintBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + u, msg, err := ReadUint64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendUint(scratch[0:0], u, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + var f float32 + var err error + f, msg, err = ReadFloat32Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + var f float64 + var err error + f, msg, err = ReadFloat64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + var t time.Time + var err error + t, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := t.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err +} + +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + var err error + var et int8 + et, err = peekExtension(msg) + if err != nil { + return msg, scratch, err + } + + // if it's time.Time + if et == TimeExtension || et == MsgTimeExtension { + var tm time.Time + tm, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := tm.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // if the extension is registered, + // use its canonical JSON form + if f, ok := extensionReg[et]; ok { + e := f() + msg, err = ReadExtensionBytes(msg, e) + if err != nil { + return msg, scratch, err + } + bts, err := json.Marshal(e) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // otherwise, write `{"type": , "data": ""}` + r := RawExtension{} + r.Type = et + msg, err = ReadExtensionBytes(msg, &r) + if err != nil { + return msg, scratch, err + } + scratch, err = writeExt(w, r, scratch) + return msg, scratch, err +} + +func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { + _, err := w.WriteString(`{"type":`) + if err != nil { + return scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`,"data":"`) + if err != nil { + return scratch, err + } + l := base64.StdEncoding.EncodedLen(len(r.Data)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, r.Data) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`"}`) + return scratch, err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go new file mode 100644 index 000000000000..edfe328b4461 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number.go @@ -0,0 +1,266 @@ +package msgp + +import ( + "math" + "strconv" +) + +// The portable parts of the Number implementation + +// Number can be +// an int64, uint64, float32, +// or float64 internally. +// It can decode itself +// from any of the native +// messagepack number types. +// The zero-value of Number +// is Int(0). Using the equality +// operator with Number compares +// both the type and the value +// of the number. +type Number struct { + // internally, this + // is just a tagged union. + // the raw bits of the number + // are stored the same way regardless. + bits uint64 + typ Type +} + +// AsInt sets the number to an int64. +func (n *Number) AsInt(i int64) { + // we always store int(0) + // as {0, InvalidType} in + // order to preserve + // the behavior of the == operator + if i == 0 { + n.typ = InvalidType + n.bits = 0 + return + } + + n.typ = IntType + n.bits = uint64(i) +} + +// AsUint sets the number to a uint64. +func (n *Number) AsUint(u uint64) { + n.typ = UintType + n.bits = u +} + +// AsFloat32 sets the value of the number +// to a float32. +func (n *Number) AsFloat32(f float32) { + n.typ = Float32Type + n.bits = uint64(math.Float32bits(f)) +} + +// AsFloat64 sets the value of the +// number to a float64. +func (n *Number) AsFloat64(f float64) { + n.typ = Float64Type + n.bits = math.Float64bits(f) +} + +// Int casts the number as an int64, and +// returns whether or not that was the +// underlying type. +func (n *Number) Int() (int64, bool) { + return int64(n.bits), n.typ == IntType || n.typ == InvalidType +} + +// Uint casts the number as a uint64, and returns +// whether or not that was the underlying type. +func (n *Number) Uint() (uint64, bool) { + return n.bits, n.typ == UintType +} + +// Float casts the number to a float64, and +// returns whether or not that was the underlying +// type (either a float64 or a float32). +func (n *Number) Float() (float64, bool) { + switch n.typ { + case Float32Type: + return float64(math.Float32frombits(uint32(n.bits))), true + case Float64Type: + return math.Float64frombits(n.bits), true + default: + return 0.0, false + } +} + +// Type will return one of: +// Float64Type, Float32Type, UintType, or IntType. +func (n *Number) Type() Type { + if n.typ == InvalidType { + return IntType + } + return n.typ +} + +// DecodeMsg implements msgp.Decodable +func (n *Number) DecodeMsg(r *Reader) error { + typ, err := r.NextType() + if err != nil { + return err + } + switch typ { + case Float32Type: + f, err := r.ReadFloat32() + if err != nil { + return err + } + n.AsFloat32(f) + return nil + case Float64Type: + f, err := r.ReadFloat64() + if err != nil { + return err + } + n.AsFloat64(f) + return nil + case IntType: + i, err := r.ReadInt64() + if err != nil { + return err + } + n.AsInt(i) + return nil + case UintType: + u, err := r.ReadUint64() + if err != nil { + return err + } + n.AsUint(u) + return nil + default: + return TypeError{Encoded: typ, Method: IntType} + } +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { + typ := NextType(b) + switch typ { + case IntType: + i, o, err := ReadInt64Bytes(b) + if err != nil { + return b, err + } + n.AsInt(i) + return o, nil + case UintType: + u, o, err := ReadUint64Bytes(b) + if err != nil { + return b, err + } + n.AsUint(u) + return o, nil + case Float64Type: + f, o, err := ReadFloat64Bytes(b) + if err != nil { + return b, err + } + n.AsFloat64(f) + return o, nil + case Float32Type: + f, o, err := ReadFloat32Bytes(b) + if err != nil { + return b, err + } + n.AsFloat32(f) + return o, nil + default: + return b, TypeError{Method: IntType, Encoded: typ} + } +} + +// MarshalMsg implements msgp.Marshaler +func (n *Number) MarshalMsg(b []byte) ([]byte, error) { + switch n.typ { + case IntType: + return AppendInt64(b, int64(n.bits)), nil + case UintType: + return AppendUint64(b, uint64(n.bits)), nil + case Float64Type: + return AppendFloat64(b, math.Float64frombits(n.bits)), nil + case Float32Type: + return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil + default: + return AppendInt64(b, 0), nil + } +} + +// EncodeMsg implements msgp.Encodable +func (n *Number) EncodeMsg(w *Writer) error { + switch n.typ { + case IntType: + return w.WriteInt64(int64(n.bits)) + case UintType: + return w.WriteUint64(n.bits) + case Float64Type: + return w.WriteFloat64(math.Float64frombits(n.bits)) + case Float32Type: + return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) + default: + return w.WriteInt64(0) + } +} + +// Msgsize implements msgp.Sizer +func (n *Number) Msgsize() int { + switch n.typ { + case Float32Type: + return Float32Size + case Float64Type: + return Float64Size + case IntType: + return Int64Size + case UintType: + return Uint64Size + default: + return 1 // fixint(0) + } +} + +// MarshalJSON implements json.Marshaler +func (n *Number) MarshalJSON() ([]byte, error) { + t := n.Type() + if t == InvalidType { + return []byte{'0'}, nil + } + out := make([]byte, 0, 32) + switch t { + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.AppendFloat(out, f, 'f', -1, 64), nil + case IntType: + i, _ := n.Int() + return strconv.AppendInt(out, i, 10), nil + case UintType: + u, _ := n.Uint() + return strconv.AppendUint(out, u, 10), nil + default: + panic("(*Number).typ is invalid") + } +} + +// String implements fmt.Stringer +func (n *Number) String() string { + switch n.typ { + case InvalidType: + return "0" + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.FormatFloat(f, 'f', -1, 64) + case IntType: + i, _ := n.Int() + return strconv.FormatInt(i, 10) + case UintType: + u, _ := n.Uint() + return strconv.FormatUint(u, 10) + default: + panic("(*Number).typ is invalid") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go new file mode 100644 index 000000000000..fe8723412bf9 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/purego.go @@ -0,0 +1,16 @@ +//go:build (purego && !unsafe) || appengine +// +build purego,!unsafe appengine + +package msgp + +// let's just assume appengine +// uses 64-bit hardware... +const smallint = false + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go new file mode 100644 index 000000000000..20d3463bbd44 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -0,0 +1,1494 @@ +package msgp + +import ( + "encoding/binary" + "encoding/json" + "io" + "math" + "strconv" + "sync" + "time" + + "github.com/philhofer/fwd" +) + +// where we keep old *Readers +var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} + +// Type is a MessagePack wire type, +// including this package's built-in +// extension types. +type Type byte + +// MessagePack Types +// +// The zero value of Type +// is InvalidType. +const ( + InvalidType Type = iota + + // MessagePack built-in types + + StrType + BinType + MapType + ArrayType + Float64Type + Float32Type + BoolType + IntType + UintType + NilType + DurationType + ExtensionType + + // pseudo-types provided + // by extensions + + Complex64Type + Complex128Type + TimeType + NumberType + + _maxtype +) + +// String implements fmt.Stringer +func (t Type) String() string { + switch t { + case StrType: + return "str" + case BinType: + return "bin" + case MapType: + return "map" + case ArrayType: + return "array" + case Float64Type: + return "float64" + case Float32Type: + return "float32" + case BoolType: + return "bool" + case UintType: + return "uint" + case IntType: + return "int" + case ExtensionType: + return "ext" + case NilType: + return "nil" + case NumberType: + return "number" + default: + return "" + } +} + +func freeR(m *Reader) { + readerPool.Put(m) +} + +// Unmarshaler is the interface fulfilled +// by objects that know how to unmarshal +// themselves from MessagePack. +// UnmarshalMsg unmarshals the object +// from binary, returing any leftover +// bytes and any errors encountered. +type Unmarshaler interface { + UnmarshalMsg([]byte) ([]byte, error) +} + +// Decodable is the interface fulfilled +// by objects that know how to read +// themselves from a *Reader. +type Decodable interface { + DecodeMsg(*Reader) error +} + +// Decode decodes 'd' from 'r'. +func Decode(r io.Reader, d Decodable) error { + rd := NewReader(r) + err := d.DecodeMsg(rd) + freeR(rd) + return err +} + +// NewReader returns a *Reader that +// reads from the provided reader. The +// reader will be buffered. +func NewReader(r io.Reader) *Reader { + p := readerPool.Get().(*Reader) + if p.R == nil { + p.R = fwd.NewReader(r) + } else { + p.R.Reset(r) + } + return p +} + +// NewReaderSize returns a *Reader with a buffer of the given size. +// (This is vastly preferable to passing the decoder a reader that is already buffered.) +func NewReaderSize(r io.Reader, sz int) *Reader { + return &Reader{R: fwd.NewReaderSize(r, sz)} +} + +// NewReaderBuf returns a *Reader with a provided buffer. +func NewReaderBuf(r io.Reader, buf []byte) *Reader { + return &Reader{R: fwd.NewReaderBuf(r, buf)} +} + +// Reader wraps an io.Reader and provides +// methods to read MessagePack-encoded values +// from it. Readers are buffered. +type Reader struct { + // R is the buffered reader + // that the Reader uses + // to decode MessagePack. + // The Reader itself + // is stateless; all the + // buffering is done + // within R. + R *fwd.Reader + scratch []byte + recursionDepth int +} + +// Read implements `io.Reader` +func (m *Reader) Read(p []byte) (int, error) { + return m.R.Read(p) +} + +// CopyNext reads the next object from m without decoding it and writes it to w. +// It avoids unnecessary copies internally. +func (m *Reader) CopyNext(w io.Writer) (int64, error) { + sz, o, err := getNextSize(m.R) + if err != nil { + return 0, err + } + + var n int64 + // Opportunistic optimization: if we can fit the whole thing in the m.R + // buffer, then just get a pointer to that, and pass it to w.Write, + // avoiding an allocation. + if int(sz) <= m.R.BufferSize() { + var nn int + var buf []byte + buf, err = m.R.Next(int(sz)) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + return 0, err + } + nn, err = w.Write(buf) + n += int64(nn) + } else { + // Fall back to io.CopyN. + // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer) + n, err = io.CopyN(w, m.R, int64(sz)) + if err == io.ErrUnexpectedEOF { + err = ErrShortBytes + } + } + if err != nil { + return n, err + } else if n < int64(sz) { + return n, io.ErrShortWrite + } + + if done, err := m.recursiveCall(); err != nil { + return n, err + } else { + defer done() + } + // for maps and slices, read elements + for x := uintptr(0); x < o; x++ { + var n2 int64 + n2, err = m.CopyNext(w) + if err != nil { + return n, err + } + n += n2 + } + return n, nil +} + +// recursiveCall will increment the recursion depth and return an error if it is exceeded. +// If a nil error is returned, done must be called to decrement the counter. +func (m *Reader) recursiveCall() (done func(), err error) { + if m.recursionDepth >= recursionLimit { + return func() {}, ErrRecursion + } + m.recursionDepth++ + return func() { + m.recursionDepth-- + }, nil +} + +// ReadFull implements `io.ReadFull` +func (m *Reader) ReadFull(p []byte) (int, error) { + return m.R.ReadFull(p) +} + +// Reset resets the underlying reader. +func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } + +// Buffered returns the number of bytes currently in the read buffer. +func (m *Reader) Buffered() int { return m.R.Buffered() } + +// BufferSize returns the capacity of the read buffer. +func (m *Reader) BufferSize() int { return m.R.BufferSize() } + +// NextType returns the next object type to be decoded. +func (m *Reader) NextType() (Type, error) { + next, err := m.R.PeekByte() + if err != nil { + return InvalidType, err + } + t := getType(next) + if t == InvalidType { + return t, InvalidPrefixError(next) + } + if t == ExtensionType { + v, err := m.peekExtensionType() + if err != nil { + return InvalidType, err + } + switch v { + case Complex64Extension: + return Complex64Type, nil + case Complex128Extension: + return Complex128Type, nil + case TimeExtension, MsgTimeExtension: + return TimeType, nil + } + } + return t, nil +} + +// IsNil returns whether or not +// the next byte is a null messagepack byte +func (m *Reader) IsNil() bool { + p, err := m.R.PeekByte() + return err == nil && p == mnil +} + +// getNextSize returns the size of the next object on the wire. +// returns (obj size, obj elements, error) +// only maps and arrays have non-zero obj elements +// for maps and arrays, obj size does not include elements +// +// use uintptr b/c it's guaranteed to be large enough +// to hold whatever we can fit in memory. +func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { + lead, err := r.PeekByte() + if err != nil { + return 0, 0, err + } + spec := getBytespec(lead) + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { + return uintptr(size), uintptr(mode), nil + } + b, err := r.Peek(int(size)) + if err != nil { + return 0, 0, err + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// Skip skips over the next object, regardless of +// its type. If it is an array or map, the whole array +// or map will be skipped. +func (m *Reader) Skip() error { + var ( + v uintptr // bytes + o uintptr // objects + err error + p []byte + ) + + // we can use the faster + // method if we have enough + // buffered data + if m.R.Buffered() >= 5 { + p, err = m.R.Peek(5) + if err != nil { + return err + } + v, o, err = getSize(p) + if err != nil { + return err + } + } else { + v, o, err = getNextSize(m.R) + if err != nil { + return err + } + } + + // 'v' is always non-zero + // if err == nil + _, err = m.R.Skip(int(v)) + if err != nil { + return err + } + + // for maps and slices, skip elements with recursive call + if done, err := m.recursiveCall(); err != nil { + return err + } else { + defer done() + } + for x := uintptr(0); x < o; x++ { + err = m.Skip() + if err != nil { + return err + } + } + return nil +} + +// ReadMapHeader reads the next object +// as a map header and returns the size +// of the map and the number of bytes written. +// It will return a TypeError{} if the next +// object is not a map. +func (m *Reader) ReadMapHeader() (sz uint32, err error) { + var p []byte + var lead byte + lead, err = m.R.PeekByte() + if err != nil { + return + } + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mmap16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mmap32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKey reads either a 'str' or 'bin' field from +// the reader and returns the value as a []byte. It uses +// scratch for storage if it is large enough. +func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { + out, err := m.ReadStringAsBytes(scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return m.ReadBytes(scratch) + } + return nil, err + } + return out, nil +} + +// ReadMapKeyPtr returns a []byte pointing to the contents +// of a valid map key. The key cannot be empty, and it +// must be shorter than the total buffer size of the +// *Reader. Additionally, the returned slice is only +// valid until the next *Reader method call. Users +// should exercise extreme care when using this +// method; writing into the returned slice may +// corrupt future reads. +func (m *Reader) ReadMapKeyPtr() ([]byte, error) { + lead, err := m.R.PeekByte() + if err != nil { + return nil, err + } + var read int + var p []byte + if isfixstr(lead) { + read = int(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + switch lead { + case mstr8, mbin8: + p, err = m.R.Next(2) + if err != nil { + return nil, err + } + read = int(p[1]) + case mstr16, mbin16: + p, err = m.R.Next(3) + if err != nil { + return nil, err + } + read = int(big.Uint16(p[1:])) + case mstr32, mbin32: + p, err = m.R.Next(5) + if err != nil { + return nil, err + } + read = int(big.Uint32(p[1:])) + default: + return nil, badPrefix(StrType, lead) + } +fill: + if read == 0 { + return nil, ErrShortBytes + } + return m.R.Next(read) +} + +// ReadArrayHeader reads the next object as an +// array header and returns the size of the array +// and the number of bytes read. +func (m *Reader) ReadArrayHeader() (sz uint32, err error) { + lead, err := m.R.PeekByte() + if err != nil { + return + } + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + _, err = m.R.Skip(1) + return + } + var p []byte + switch lead { + case marray16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + + case marray32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNil reads a 'nil' MessagePack byte from the reader +func (m *Reader) ReadNil() error { + p, err := m.R.PeekByte() + if err != nil { + return err + } + if p != mnil { + return badPrefix(NilType, p) + } + _, err = m.R.Skip(1) + return err +} + +// ReadFloat64 reads a float64 from the reader. +// (If the value on the wire is encoded as a float32, +// it will be up-cast to a float64.) +func (m *Reader) ReadFloat64() (f float64, err error) { + var p []byte + p, err = m.R.Peek(9) + if err != nil { + // we'll allow a coversion from float32 to float64, + // since we don't lose any precision + if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + return + } + if p[0] != mfloat64 { + // see above + if p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + err = badPrefix(Float64Type, p[0]) + return + } + f = math.Float64frombits(getMuint64(p)) + _, err = m.R.Skip(9) + return +} + +// ReadFloat32 reads a float32 from the reader +func (m *Reader) ReadFloat32() (f float32, err error) { + var p []byte + p, err = m.R.Peek(5) + if err != nil { + return + } + if p[0] != mfloat32 { + err = badPrefix(Float32Type, p[0]) + return + } + f = math.Float32frombits(getMuint32(p)) + _, err = m.R.Skip(5) + return +} + +// ReadBool reads a bool from the reader +func (m *Reader) ReadBool() (b bool, err error) { + var p byte + p, err = m.R.PeekByte() + if err != nil { + return + } + switch p { + case mtrue: + b = true + case mfalse: + default: + err = badPrefix(BoolType, p) + return + } + _, err = m.R.Skip(1) + return +} + +// ReadDuration reads a time.Duration from the reader +func (m *Reader) ReadDuration() (d time.Duration, err error) { + i, err := m.ReadInt64() + return time.Duration(i), err +} + +// ReadInt64 reads an int64 from the reader +func (m *Reader) ReadInt64() (i int64, err error) { + var p []byte + lead, err := m.R.PeekByte() + if err != nil { + return + } + + if isfixint(lead) { + i = int64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } else if isnfixint(lead) { + i = int64(rnfixint(lead)) + _, err = m.R.Skip(1) + return + } + + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMint8(p)) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMint16(p)) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMint32(p)) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + i = getMint64(p) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u := getMuint64(p) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32 reads an int32 from the reader +func (m *Reader) ReadInt32() (i int32, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt32 || in < math.MinInt32 { + err = IntOverflow{Value: in, FailedBitsize: 32} + return + } + i = int32(in) + return +} + +// ReadInt16 reads an int16 from the reader +func (m *Reader) ReadInt16() (i int16, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt16 || in < math.MinInt16 { + err = IntOverflow{Value: in, FailedBitsize: 16} + return + } + i = int16(in) + return +} + +// ReadInt8 reads an int8 from the reader +func (m *Reader) ReadInt8() (i int8, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt8 || in < math.MinInt8 { + err = IntOverflow{Value: in, FailedBitsize: 8} + return + } + i = int8(in) + return +} + +// ReadInt reads an int from the reader +func (m *Reader) ReadInt() (i int, err error) { + if smallint { + var in int32 + in, err = m.ReadInt32() + i = int(in) + return + } + var in int64 + in, err = m.ReadInt64() + i = int(in) + return +} + +// ReadUint64 reads a uint64 from the reader +func (m *Reader) ReadUint64() (u uint64, err error) { + var p []byte + lead, err := m.R.PeekByte() + if err != nil { + return + } + if isfixint(lead) { + u = uint64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + v := int64(getMint8(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + u = uint64(getMuint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + v := int64(getMint16(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + u = uint64(getMuint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + v := int64(getMint32(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + u = uint64(getMuint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + v := int64(getMint64(p)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u = getMuint64(p) + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + + } +} + +// ReadUint32 reads a uint32 from the reader +func (m *Reader) ReadUint32() (u uint32, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint32 { + err = UintOverflow{Value: in, FailedBitsize: 32} + return + } + u = uint32(in) + return +} + +// ReadUint16 reads a uint16 from the reader +func (m *Reader) ReadUint16() (u uint16, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint16 { + err = UintOverflow{Value: in, FailedBitsize: 16} + return + } + u = uint16(in) + return +} + +// ReadUint8 reads a uint8 from the reader +func (m *Reader) ReadUint8() (u uint8, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + u = uint8(in) + return +} + +// ReadUint reads a uint from the reader +func (m *Reader) ReadUint() (u uint, err error) { + if smallint { + var un uint32 + un, err = m.ReadUint32() + u = uint(un) + return + } + var un uint64 + un, err = m.ReadUint64() + u = uint(un) + return +} + +// ReadByte is analogous to ReadUint8. +// +// NOTE: this is *not* an implementation +// of io.ByteReader. +func (m *Reader) ReadByte() (b byte, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + b = byte(in) + return +} + +// ReadBytes reads a MessagePack 'bin' object +// from the reader and returns its value. It may +// use 'scratch' for storage if it is non-nil. +func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead = p[0] + var read int64 + switch lead { + case mbin8: + read = int64(p[1]) + m.R.Skip(2) + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(BinType, lead) + return + } + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadBytesHeader reads the size header +// of a MessagePack 'bin' object. The user +// is responsible for dealing with the next +// 'sz' bytes from the reader in an application-specific +// way. +func (m *Reader) ReadBytesHeader() (sz uint32, err error) { + var p []byte + lead, err := m.R.PeekByte() + if err != nil { + return + } + switch lead { + case mbin8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = uint32(big.Uint32(p[1:])) + return + default: + err = badPrefix(BinType, p[0]) + return + } +} + +// ReadExactBytes reads a MessagePack 'bin'-encoded +// object off of the wire into the provided slice. An +// ArrayError will be returned if the object is not +// exactly the length of the input slice. +func (m *Reader) ReadExactBytes(into []byte) error { + p, err := m.R.Peek(2) + if err != nil { + return err + } + lead := p[0] + var read int64 // bytes to read + var skip int // prefix size to skip + switch lead { + case mbin8: + read = int64(p[1]) + skip = 2 + case mbin16: + p, err = m.R.Peek(3) + if err != nil { + return err + } + read = int64(big.Uint16(p[1:])) + skip = 3 + case mbin32: + p, err = m.R.Peek(5) + if err != nil { + return err + } + read = int64(big.Uint32(p[1:])) + skip = 5 + default: + return badPrefix(BinType, lead) + } + if read != int64(len(into)) { + return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} + } + m.R.Skip(skip) + _, err = m.R.ReadFull(into) + return err +} + +// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string +// and returns its value as bytes. It may use 'scratch' for storage +// if it is non-nil. +func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { + var p []byte + lead, err := m.R.PeekByte() + if err != nil { + return + } + var read int64 + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadStringHeader reads a string header +// off of the wire. The user is then responsible +// for dealing with the next 'sz' bytes from +// the reader in an application-specific manner. +func (m *Reader) ReadStringHeader() (sz uint32, err error) { + lead, err := m.R.PeekByte() + if err != nil { + return + } + if isfixstr(lead) { + sz = uint32(rfixstr(lead)) + m.R.Skip(1) + return + } + var p []byte + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(StrType, lead) + return + } +} + +// ReadString reads a utf-8 string from the reader +func (m *Reader) ReadString() (s string, err error) { + var read int64 + lead, err := m.R.PeekByte() + if err != nil { + return + } + + var p []byte + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if read == 0 { + s, err = "", nil + return + } + // reading into the memory + // that will become the string + // itself has vastly superior + // worst-case performance, because + // the reader buffer doesn't have + // to be large enough to hold the string. + // the idea here is to make it more + // difficult for someone malicious + // to cause the system to run out of + // memory by sending very large strings. + // + // NOTE: this works because the argument + // passed to (*fwd.Reader).ReadFull escapes + // to the heap; its argument may, in turn, + // be passed to the underlying reader, and + // thus escape analysis *must* conclude that + // 'out' escapes. + out := make([]byte, read) + _, err = m.R.ReadFull(out) + if err != nil { + return + } + s = UnsafeString(out) + return +} + +// ReadComplex64 reads a complex64 from the reader +func (m *Reader) ReadComplex64() (f complex64, err error) { + var p []byte + p, err = m.R.Peek(10) + if err != nil { + return + } + if p[0] != mfixext8 { + err = badPrefix(Complex64Type, p[0]) + return + } + if int8(p[1]) != Complex64Extension { + err = errExt(int8(p[1]), Complex64Extension) + return + } + f = complex(math.Float32frombits(big.Uint32(p[2:])), + math.Float32frombits(big.Uint32(p[6:]))) + _, err = m.R.Skip(10) + return +} + +// ReadComplex128 reads a complex128 from the reader +func (m *Reader) ReadComplex128() (f complex128, err error) { + var p []byte + p, err = m.R.Peek(18) + if err != nil { + return + } + if p[0] != mfixext16 { + err = badPrefix(Complex128Type, p[0]) + return + } + if int8(p[1]) != Complex128Extension { + err = errExt(int8(p[1]), Complex128Extension) + return + } + f = complex(math.Float64frombits(big.Uint64(p[2:])), + math.Float64frombits(big.Uint64(p[10:]))) + _, err = m.R.Skip(18) + return +} + +// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. +// (You must pass a non-nil map into the function.) +func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { + var sz uint32 + sz, err = m.ReadMapHeader() + if err != nil { + return + } + for key := range mp { + delete(mp, key) + } + for i := uint32(0); i < sz; i++ { + var key string + var val interface{} + key, err = m.ReadString() + if err != nil { + return + } + val, err = m.ReadIntf() + if err != nil { + return + } + mp[key] = val + } + return +} + +// ReadTimeUTC reads a time.Time object from the reader. +// The returned time's location will be set to UTC. +func (m *Reader) ReadTimeUTC() (t time.Time, err error) { + t, err = m.ReadTime() + return t.UTC(), err +} + +// ReadTime reads a time.Time object from the reader. +// The returned time's location will be set to time.Local. +func (m *Reader) ReadTime() (t time.Time, err error) { + offset, length, extType, err := m.peekExtensionHeader() + if err != nil { + return t, err + } + + switch extType { + case TimeExtension: + var p []byte + p, err = m.R.Peek(15) + if err != nil { + return + } + if p[0] != mext8 || p[1] != 12 { + err = badPrefix(TimeType, p[0]) + return + } + if int8(p[2]) != TimeExtension { + err = errExt(int8(p[2]), TimeExtension) + return + } + sec, nsec := getUnix(p[3:]) + t = time.Unix(sec, int64(nsec)).Local() + _, err = m.R.Skip(15) + return + case MsgTimeExtension: + switch length { + case 4, 8, 12: + var tmp [12]byte + _, err = m.R.Skip(offset) + if err != nil { + return + } + var n int + n, err = m.R.Read(tmp[:length]) + if err != nil { + return + } + if n != length { + err = ErrShortBytes + return + } + b := tmp[:length] + switch length { + case 4: + t = time.Unix(int64(binary.BigEndian.Uint32(b)), 0).Local() + case 8: + v := binary.BigEndian.Uint64(b) + nanos := int64(v >> 34) + if nanos > 999999999 { + // In timestamp 64 and timestamp 96 formats, nanoseconds must not be larger than 999999999. + err = InvalidTimestamp{Nanos: nanos} + return + } + t = time.Unix(int64(v&(1<<34-1)), nanos).Local() + case 12: + nanos := int64(binary.BigEndian.Uint32(b)) + if nanos > 999999999 { + // In timestamp 64 and timestamp 96 formats, nanoseconds must not be larger than 999999999. + err = InvalidTimestamp{Nanos: nanos} + return + } + ux := int64(binary.BigEndian.Uint64(b[4:])) + t = time.Unix(ux, nanos).Local() + } + default: + err = InvalidTimestamp{FieldLength: length} + } + default: + err = errExt(extType, TimeExtension) + } + return +} + +// ReadJSONNumber reads an integer or a float value and return as json.Number +func (m *Reader) ReadJSONNumber() (n json.Number, err error) { + t, err := m.NextType() + if err != nil { + return + } + switch t { + case IntType: + v, err := m.ReadInt64() + if err == nil { + return json.Number(strconv.FormatInt(v, 10)), nil + } + return "", err + case UintType: + v, err := m.ReadUint64() + if err == nil { + return json.Number(strconv.FormatUint(v, 10)), nil + } + return "", err + case Float32Type, Float64Type: + v, err := m.ReadFloat64() + if err == nil { + return json.Number(strconv.FormatFloat(v, 'f', -1, 64)), nil + } + return "", err + } + return "", TypeError{Method: NumberType, Encoded: t} +} + +// ReadIntf reads out the next object as a raw interface{}/any. +// Arrays are decoded as []interface{}, and maps are decoded +// as map[string]interface{}. Integers are decoded as int64 +// and unsigned integers are decoded as uint64. +func (m *Reader) ReadIntf() (i interface{}, err error) { + var t Type + t, err = m.NextType() + if err != nil { + return + } + switch t { + case BoolType: + i, err = m.ReadBool() + return + + case IntType: + i, err = m.ReadInt64() + return + + case UintType: + i, err = m.ReadUint64() + return + + case BinType: + i, err = m.ReadBytes(nil) + return + + case StrType: + i, err = m.ReadString() + return + + case Complex64Type: + i, err = m.ReadComplex64() + return + + case Complex128Type: + i, err = m.ReadComplex128() + return + + case TimeType: + i, err = m.ReadTime() + return + + case DurationType: + i, err = m.ReadDuration() + return + + case ExtensionType: + var t int8 + t, err = m.peekExtensionType() + if err != nil { + return + } + f, ok := extensionReg[t] + if ok { + e := f() + err = m.ReadExtension(e) + i = e + return + } + var e RawExtension + e.Type = t + err = m.ReadExtension(&e) + i = &e + return + + case MapType: + // This can call back here, so treat as recursive call. + if done, err := m.recursiveCall(); err != nil { + return nil, err + } else { + defer done() + } + + mp := make(map[string]interface{}) + err = m.ReadMapStrIntf(mp) + i = mp + return + + case NilType: + err = m.ReadNil() + i = nil + return + + case Float32Type: + i, err = m.ReadFloat32() + return + + case Float64Type: + i, err = m.ReadFloat64() + return + + case ArrayType: + var sz uint32 + sz, err = m.ReadArrayHeader() + + if err != nil { + return + } + + if done, err := m.recursiveCall(); err != nil { + return nil, err + } else { + defer done() + } + + out := make([]interface{}, int(sz)) + for j := range out { + out[j], err = m.ReadIntf() + if err != nil { + return + } + } + i = out + return + + default: + return nil, fatal // unreachable + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go new file mode 100644 index 000000000000..8ed15a96887c --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -0,0 +1,1393 @@ +package msgp + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "math" + "strconv" + "time" +) + +var big = binary.BigEndian + +// NextType returns the type of the next +// object in the slice. If the length +// of the input is zero, it returns +// [InvalidType]. +func NextType(b []byte) Type { + if len(b) == 0 { + return InvalidType + } + spec := getBytespec(b[0]) + t := spec.typ + if t == ExtensionType && len(b) > int(spec.size) { + var tp int8 + if spec.extra == constsize { + tp = int8(b[1]) + } else { + tp = int8(b[spec.size-1]) + } + switch tp { + case TimeExtension, MsgTimeExtension: + return TimeType + case Complex128Extension: + return Complex128Type + case Complex64Extension: + return Complex64Type + default: + return ExtensionType + } + } + return t +} + +// IsNil returns true if len(b)>0 and +// the leading byte is a 'nil' MessagePack +// byte; false otherwise +func IsNil(b []byte) bool { + if len(b) != 0 && b[0] == mnil { + return true + } + return false +} + +// Raw is raw MessagePack. +// Raw allows you to read and write +// data without interpreting its contents. +type Raw []byte + +// MarshalMsg implements [Marshaler]. +// It appends the raw contents of 'raw' +// to the provided byte slice. If 'raw' +// is 0 bytes, 'nil' will be appended instead. +func (r Raw) MarshalMsg(b []byte) ([]byte, error) { + i := len(r) + if i == 0 { + return AppendNil(b), nil + } + o, l := ensure(b, i) + copy(o[l:], []byte(r)) + return o, nil +} + +// UnmarshalMsg implements [Unmarshaler]. +// It sets the contents of *Raw to be the next +// object in the provided byte slice. +func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { + l := len(b) + out, err := Skip(b) + if err != nil { + return b, err + } + rlen := l - len(out) + if IsNil(b[:rlen]) { + rlen = 0 + } + if cap(*r) < rlen { + *r = make(Raw, rlen) + } else { + *r = (*r)[0:rlen] + } + copy(*r, b[:rlen]) + return out, nil +} + +// EncodeMsg implements [Encodable]. +// It writes the raw bytes to the writer. +// If r is empty, it writes 'nil' instead. +func (r Raw) EncodeMsg(w *Writer) error { + if len(r) == 0 { + return w.WriteNil() + } + _, err := w.Write([]byte(r)) + return err +} + +// DecodeMsg implements [Decodable]. +// It sets the value of *Raw to be the +// next object on the wire. +func (r *Raw) DecodeMsg(f *Reader) error { + *r = (*r)[:0] + err := appendNext(f, (*[]byte)(r)) + if IsNil(*r) { + *r = (*r)[:0] + } + return err +} + +// Msgsize implements [Sizer]. +func (r Raw) Msgsize() int { + l := len(r) + if l == 0 { + return 1 // for 'nil' + } + return l +} + +func appendNext(f *Reader, d *[]byte) error { + amt, o, err := getNextSize(f.R) + if err != nil { + return err + } + var i int + *d, i = ensure(*d, int(amt)) + _, err = f.R.ReadFull((*d)[i:]) + if err != nil { + return err + } + for o > 0 { + err = appendNext(f, d) + if err != nil { + return err + } + o-- + } + return nil +} + +// MarshalJSON implements [json.Marshaler]. +func (r *Raw) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + _, err := UnmarshalAsJSON(&buf, []byte(*r)) + return buf.Bytes(), err +} + +// ReadMapHeaderBytes reads a map header size +// from 'b' and returns the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a map) +func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + b = b[1:] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + o = b + return + } + + switch lead { + case mmap16: + if len(b) < 2 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b)) + o = b[2:] + return + + case mmap32: + if len(b) < 4 { + err = ErrShortBytes + return + } + sz = big.Uint32(b) + o = b[4:] + return + + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKeyZC attempts to read a map key +// from 'b' and returns the key bytes and the remaining bytes +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a str or bin) +func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { + o, x, err := ReadStringZC(b) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return ReadBytesZC(b) + } + return nil, b, err + } + return o, x, nil +} + +// ReadArrayHeaderBytes attempts to read +// the array header size off of 'b' and return +// the size and remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not an array) +func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + lead := b[0] + b = b[1:] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + o = b + return + } + + switch lead { + case marray16: + if len(b) < 2 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b)) + o = b[2:] + return + + case marray32: + if len(b) < 4 { + err = ErrShortBytes + return + } + sz = big.Uint32(b) + o = b[4:] + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadBytesHeader reads the 'bin' header size +// off of 'b' and returns the size and remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a bin object) +func ReadBytesHeader(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + switch b[0] { + case mbin8: + if len(b) < 2 { + err = ErrShortBytes + return + } + sz = uint32(b[1]) + o = b[2:] + return + case mbin16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + case mbin32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + default: + err = badPrefix(BinType, b[0]) + return + } +} + +// ReadNilBytes tries to read a "nil" byte +// off of 'b' and return the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a 'nil') +// - [InvalidPrefixError] +func ReadNilBytes(b []byte) ([]byte, error) { + if len(b) < 1 { + return nil, ErrShortBytes + } + if b[0] != mnil { + return b, badPrefix(NilType, b[0]) + } + return b[1:], nil +} + +// ReadFloat64Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a float64) +func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { + if len(b) < 9 { + if len(b) >= 5 && b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = ErrShortBytes + return + } + + if b[0] != mfloat64 { + if b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = badPrefix(Float64Type, b[0]) + return + } + + f = math.Float64frombits(getMuint64(b)) + o = b[9:] + return +} + +// ReadFloat32Bytes tries to read a float32 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a float32) +func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { + if len(b) < 5 { + err = ErrShortBytes + return + } + + if b[0] != mfloat32 { + err = TypeError{Method: Float32Type, Encoded: getType(b[0])} + return + } + + f = math.Float32frombits(getMuint32(b)) + o = b[5:] + return +} + +// ReadBoolBytes tries to read a bool +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a bool) +func ReadBoolBytes(b []byte) (bool, []byte, error) { + if len(b) < 1 { + return false, b, ErrShortBytes + } + switch b[0] { + case mtrue: + return true, b[1:], nil + case mfalse: + return false, b[1:], nil + default: + return false, b, badPrefix(BoolType, b[0]) + } +} + +// ReadDurationBytes tries to read a time.Duration +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - TypeError (not a int) +func ReadDurationBytes(b []byte) (d time.Duration, o []byte, err error) { + i, o, err := ReadInt64Bytes(b) + return time.Duration(i), o, err +} + +// ReadInt64Bytes tries to read an int64 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a int) +func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + b = b[1:] + if isfixint(lead) { + i = int64(rfixint(lead)) + o = b + return + } + if isnfixint(lead) { + i = int64(rnfixint(lead)) + o = b + return + } + + switch lead { + case mint8: + if len(b) < 1 { + err = ErrShortBytes + return + } + i = int64(int8(b[0])) + o = b[1:] + return + + case muint8: + if len(b) < 1 { + err = ErrShortBytes + return + } + i = int64(b[0]) + o = b[1:] + return + + case mint16: + if len(b) < 2 { + err = ErrShortBytes + return + } + i = int64(int16(big.Uint16(b))) + o = b[2:] + return + + case muint16: + if len(b) < 2 { + err = ErrShortBytes + return + } + i = int64(big.Uint16(b)) + o = b[2:] + return + + case mint32: + if len(b) < 4 { + err = ErrShortBytes + return + } + i = int64(int32(big.Uint32(b))) + o = b[4:] + return + + case muint32: + if len(b) < 4 { + err = ErrShortBytes + return + } + i = int64(big.Uint32(b)) + o = b[4:] + return + + case mint64: + if len(b) < 8 { + err = ErrShortBytes + return + } + i = int64(big.Uint64(b)) + o = b[8:] + return + + case muint64: + if len(b) < 8 { + err = ErrShortBytes + return + } + u := big.Uint64(b) + if u > math.MaxInt64 { + err = UintOverflow{Value: u, FailedBitsize: 64} + return + } + i = int64(u) + o = b[8:] + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32Bytes tries to read an int32 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a int) +// - [IntOverflow] (value doesn't fit in int32) +func ReadInt32Bytes(b []byte) (int32, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt32 || i < math.MinInt32 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 32} + } + return int32(i), o, err +} + +// ReadInt16Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a int) +// - [IntOverflow] (value doesn't fit in int16) +func ReadInt16Bytes(b []byte) (int16, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt16 || i < math.MinInt16 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 16} + } + return int16(i), o, err +} + +// ReadInt8Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a int) +// - [IntOverflow] (value doesn't fit in int8) +func ReadInt8Bytes(b []byte) (int8, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt8 || i < math.MinInt8 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 8} + } + return int8(i), o, err +} + +// ReadIntBytes tries to read an int +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a int) +// - [IntOverflow] (value doesn't fit in int; 32-bit platforms only) +func ReadIntBytes(b []byte) (int, []byte, error) { + if smallint { + i, b, err := ReadInt32Bytes(b) + return int(i), b, err + } + i, b, err := ReadInt64Bytes(b) + return int(i), b, err +} + +// ReadUint64Bytes tries to read a uint64 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a uint) +func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + b = b[1:] + if isfixint(lead) { + u = uint64(rfixint(lead)) + o = b + return + } + + switch lead { + case mint8: + if len(b) < 1 { + err = ErrShortBytes + return + } + v := int64(int8(b[0])) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[1:] + return + + case muint8: + if len(b) < 1 { + err = ErrShortBytes + return + } + u = uint64(b[0]) + o = b[1:] + return + + case mint16: + if len(b) < 2 { + err = ErrShortBytes + return + } + v := int64((int16(b[0]) << 8) | int16(b[1])) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[2:] + return + + case muint16: + if len(b) < 2 { + err = ErrShortBytes + return + } + u = uint64(big.Uint16(b)) + o = b[2:] + return + + case mint32: + if len(b) < 4 { + err = ErrShortBytes + return + } + v := int64(int32(big.Uint32(b))) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[4:] + return + + case muint32: + if len(b) < 4 { + err = ErrShortBytes + return + } + u = uint64(big.Uint32(b)) + o = b[4:] + return + + case mint64: + if len(b) < 8 { + err = ErrShortBytes + return + } + v := int64(big.Uint64(b)) + if v < 0 { + err = UintBelowZero{Value: v} + return + } + u = uint64(v) + o = b[8:] + return + + case muint64: + if len(b) < 8 { + err = ErrShortBytes + return + } + u = big.Uint64(b) + o = b[8:] + return + + default: + if isnfixint(lead) { + err = UintBelowZero{Value: int64(rnfixint(lead))} + } else { + err = badPrefix(UintType, lead) + } + return + } +} + +// ReadUint32Bytes tries to read a uint32 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a uint) +// - [UintOverflow] (value too large for uint32) +func ReadUint32Bytes(b []byte) (uint32, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint32 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} + } + return uint32(v), o, err +} + +// ReadUint16Bytes tries to read a uint16 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a uint) +// - [UintOverflow] (value too large for uint16) +func ReadUint16Bytes(b []byte) (uint16, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint16 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} + } + return uint16(v), o, err +} + +// ReadUint8Bytes tries to read a uint8 +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a uint) +// - [UintOverflow] (value too large for uint8) +func ReadUint8Bytes(b []byte) (uint8, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint8 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} + } + return uint8(v), o, err +} + +// ReadUintBytes tries to read a uint +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a uint) +// - [UintOverflow] (value too large for uint; 32-bit platforms only) +func ReadUintBytes(b []byte) (uint, []byte, error) { + if smallint { + u, b, err := ReadUint32Bytes(b) + return uint(u), b, err + } + u, b, err := ReadUint64Bytes(b) + return uint(u), b, err +} + +// ReadByteBytes is analogous to ReadUint8Bytes +func ReadByteBytes(b []byte) (byte, []byte, error) { + return ReadUint8Bytes(b) +} + +// ReadBytesBytes reads a 'bin' object +// from 'b' and returns its vaue and +// the remaining bytes in 'b'. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - [TypeError] (not a 'bin' object) +func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, scratch, false) +} + +func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + b = b[1:] + var read int + switch lead { + case mbin8: + if len(b) < 1 { + err = ErrShortBytes + return + } + + read = int(b[0]) + b = b[1:] + + case mbin16: + if len(b) < 2 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b)) + b = b[2:] + + case mbin32: + if len(b) < 4 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b)) + b = b[4:] + + default: + err = badPrefix(BinType, lead) + return + } + + if len(b) < read { + err = ErrShortBytes + return + } + + // zero-copy + if zc { + v = b[0:read] + o = b[read:] + return + } + + if cap(scratch) >= read { + v = scratch[0:read] + } else { + v = make([]byte, read) + } + + o = b[copy(v, b):] + return +} + +// ReadBytesZC extracts the messagepack-encoded +// binary field without copying. The returned []byte +// points to the same memory as the input slice. +// +// Possible errors: +// +// - [ErrShortBytes] (b not long enough) +// - [TypeError] (object not 'bin') +func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, nil, true) +} + +func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { + if len(b) < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + var read uint32 + b = b[1:] + switch lead { + case mbin8: + if len(b) < 1 { + err = ErrShortBytes + return + } + + read = uint32(b[0]) + b = b[1:] + + case mbin16: + if len(b) < 2 { + err = ErrShortBytes + return + } + read = uint32(big.Uint16(b)) + b = b[2:] + + case mbin32: + if len(b) < 4 { + err = ErrShortBytes + return + } + read = big.Uint32(b) + b = b[4:] + + default: + err = badPrefix(BinType, lead) + return + } + + if read != uint32(len(into)) { + err = ArrayError{Wanted: uint32(len(into)), Got: read} + return + } + + o = b[copy(into, b):] + return +} + +// ReadStringZC reads a messagepack string field +// without copying. The returned []byte points +// to the same memory as the input slice. +// +// Possible errors: +// +// - [ErrShortBytes] (b not long enough) +// - [TypeError] (object not 'str') +func ReadStringZC(b []byte) (v []byte, o []byte, err error) { + if len(b) < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + + b = b[1:] + if isfixstr(lead) { + read = int(rfixstr(lead)) + } else { + switch lead { + case mstr8: + if len(b) < 1 { + err = ErrShortBytes + return + } + read = int(b[0]) + b = b[1:] + + case mstr16: + if len(b) < 2 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b)) + b = b[2:] + + case mstr32: + if len(b) < 4 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b)) + b = b[4:] + + default: + err = TypeError{Method: StrType, Encoded: getType(lead)} + return + } + } + + if len(b) < read { + err = ErrShortBytes + return + } + + v = b[0:read] + o = b[read:] + return +} + +// ReadStringBytes reads a 'str' object +// from 'b' and returns its value and the +// remaining bytes in 'b'. +// +// Possible errors: +// +// - [ErrShortBytes] (b not long enough) +// - [TypeError] (not 'str' type) +// - [InvalidPrefixError] +func ReadStringBytes(b []byte) (string, []byte, error) { + v, o, err := ReadStringZC(b) + return string(v), o, err +} + +// ReadStringAsBytes reads a 'str' object +// into a slice of bytes. 'v' is the value of +// the 'str' object, which may reside in memory +// pointed to by 'scratch.' 'o' is the remaining bytes +// in 'b'. +// +// Possible errors: +// +// - [ErrShortBytes] (b not long enough) +// - [TypeError] (not 'str' type) +// - [InvalidPrefixError] (unknown type marker) +func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + var tmp []byte + tmp, o, err = ReadStringZC(b) + v = append(scratch[:0], tmp...) + return +} + +// ReadComplex128Bytes reads a complex128 +// extension object from 'b' and returns the +// remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (not enough bytes in 'b') +// - [TypeError] (object not a complex128) +// - [InvalidPrefixError] +// - [ExtensionTypeError] (object an extension of the correct size, but not a complex128) +func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { + if len(b) < 18 { + err = ErrShortBytes + return + } + if b[0] != mfixext16 { + err = badPrefix(Complex128Type, b[0]) + return + } + if int8(b[1]) != Complex128Extension { + err = errExt(int8(b[1]), Complex128Extension) + return + } + c = complex(math.Float64frombits(big.Uint64(b[2:])), + math.Float64frombits(big.Uint64(b[10:]))) + o = b[18:] + return +} + +// ReadComplex64Bytes reads a complex64 +// extension object from 'b' and returns the +// remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (not enough bytes in 'b') +// - [TypeError] (object not a complex64) +// - [ExtensionTypeError] (object an extension of the correct size, but not a complex64) +func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { + if len(b) < 10 { + err = ErrShortBytes + return + } + if b[0] != mfixext8 { + err = badPrefix(Complex64Type, b[0]) + return + } + if b[1] != Complex64Extension { + err = errExt(int8(b[1]), Complex64Extension) + return + } + c = complex(math.Float32frombits(big.Uint32(b[2:])), + math.Float32frombits(big.Uint32(b[6:]))) + o = b[10:] + return +} + +// ReadTimeUTCBytes does the same as ReadTimeBytes, but returns the value as UTC. +func ReadTimeUTCBytes(b []byte) (t time.Time, o []byte, err error) { + t, o, err = ReadTimeBytes(b) + return t.UTC(), o, err +} + +// ReadTimeBytes reads a time.Time +// extension object from 'b' and returns the +// remaining bytes. +// Both the official and the format in this package will be read. +// +// Possible errors: +// +// - [ErrShortBytes] (not enough bytes in 'b') +// - [TypeError] (object not a time extension 5 or -1) +// - [ExtensionTypeError] (object an extension of the correct size, but not a time.Time) +func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { + if len(b) < 6 { + err = ErrShortBytes + return + } + typ, o, b, err := readExt(b) + if err != nil { + return + } + switch typ { + case TimeExtension: + if len(b) != 12 { + err = ErrShortBytes + return + } + sec, nsec := getUnix(b) + t = time.Unix(sec, int64(nsec)).Local() + return + case MsgTimeExtension: + switch len(b) { + case 4: + t = time.Unix(int64(binary.BigEndian.Uint32(b)), 0).Local() + return + case 8: + v := binary.BigEndian.Uint64(b) + nanos := int64(v >> 34) + if nanos > 999999999 { + // In timestamp 64 and timestamp 96 formats, nanoseconds must not be larger than 999999999. + err = InvalidTimestamp{Nanos: nanos} + return + } + t = time.Unix(int64(v&(1<<34-1)), nanos).Local() + return + case 12: + nanos := int64(binary.BigEndian.Uint32(b)) + if nanos > 999999999 { + // In timestamp 64 and timestamp 96 formats, nanoseconds must not be larger than 999999999. + err = InvalidTimestamp{Nanos: nanos} + return + } + ux := int64(binary.BigEndian.Uint64(b[4:])) + t = time.Unix(ux, nanos).Local() + return + default: + err = InvalidTimestamp{FieldLength: len(b)} + return + } + default: + err = errExt(int8(b[2]), TimeExtension) + return + } +} + +// ReadMapStrIntfBytes reads a map[string]interface{} +// out of 'b' and returns the map and remaining bytes. +// If 'old' is non-nil, the values will be read into that map. +func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + return readMapStrIntfBytesDepth(b, old, 0) +} + +func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (v map[string]interface{}, o []byte, err error) { + if depth >= recursionLimit { + err = ErrRecursion + return + } + + var sz uint32 + o = b + sz, o, err = ReadMapHeaderBytes(o) + + if err != nil { + return + } + + if old != nil { + for key := range old { + delete(old, key) + } + v = old + } else { + v = make(map[string]interface{}, int(sz)) + } + + for z := uint32(0); z < sz; z++ { + if len(o) < 1 { + err = ErrShortBytes + return + } + var key []byte + key, o, err = ReadMapKeyZC(o) + if err != nil { + return + } + var val interface{} + val, o, err = readIntfBytesDepth(o, depth) + if err != nil { + return + } + v[string(key)] = val + } + return +} + +// ReadIntfBytes attempts to read +// the next object out of 'b' as a raw interface{} and +// return the remaining bytes. +func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + return readIntfBytesDepth(b, 0) +} + +func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error) { + if depth >= recursionLimit { + err = ErrRecursion + return + } + if len(b) < 1 { + err = ErrShortBytes + return + } + + k := NextType(b) + + switch k { + case MapType: + i, o, err = readMapStrIntfBytesDepth(b, nil, depth+1) + return + + case ArrayType: + var sz uint32 + sz, o, err = ReadArrayHeaderBytes(b) + if err != nil { + return + } + j := make([]interface{}, int(sz)) + i = j + for d := range j { + j[d], o, err = readIntfBytesDepth(o, depth+1) + if err != nil { + return + } + } + return + + case Float32Type: + i, o, err = ReadFloat32Bytes(b) + return + + case Float64Type: + i, o, err = ReadFloat64Bytes(b) + return + + case IntType: + i, o, err = ReadInt64Bytes(b) + return + + case UintType: + i, o, err = ReadUint64Bytes(b) + return + + case BoolType: + i, o, err = ReadBoolBytes(b) + return + + case TimeType: + i, o, err = ReadTimeBytes(b) + return + + case Complex64Type: + i, o, err = ReadComplex64Bytes(b) + return + + case Complex128Type: + i, o, err = ReadComplex128Bytes(b) + return + + case ExtensionType: + var t int8 + t, err = peekExtension(b) + if err != nil { + return + } + // use a user-defined extension, + // if it's been registered + f, ok := extensionReg[t] + if ok { + e := f() + o, err = ReadExtensionBytes(b, e) + i = e + return + } + // last resort is a raw extension + e := RawExtension{} + e.Type = int8(t) + o, err = ReadExtensionBytes(b, &e) + i = &e + return + + case NilType: + o, err = ReadNilBytes(b) + return + + case BinType: + i, o, err = ReadBytesBytes(b, nil) + return + + case StrType: + i, o, err = ReadStringBytes(b) + return + + default: + err = InvalidPrefixError(b[0]) + return + } +} + +// Skip skips the next object in 'b' and +// returns the remaining bytes. If the object +// is a map or array, all of its elements +// will be skipped. +// +// Possible errors: +// +// - [ErrShortBytes] (not enough bytes in b) +// - [InvalidPrefixError] (bad encoding) +// - [ErrRecursion] (too deeply nested data) +func Skip(b []byte) ([]byte, error) { + return skipDepth(b, 0) +} + +func skipDepth(b []byte, depth int) ([]byte, error) { + if depth >= recursionLimit { + return b, ErrRecursion + } + sz, asz, err := getSize(b) + if err != nil { + return b, err + } + if uintptr(len(b)) < sz { + return b, ErrShortBytes + } + b = b[sz:] + for asz > 0 { + b, err = skipDepth(b, depth+1) + if err != nil { + return b, err + } + asz-- + } + return b, nil +} + +// returns (skip N bytes, skip M objects, error) +func getSize(b []byte) (uintptr, uintptr, error) { + l := len(b) + if l == 0 { + return 0, 0, ErrShortBytes + } + lead := b[0] + spec := getBytespec(lead) // get type information + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { // fixed composites + return uintptr(size), uintptr(mode), nil + } + if l < int(size) { + return 0, 0, ErrShortBytes + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// ReadJSONNumberBytes tries to read a number +// from 'b' and return the value and the remaining bytes. +// +// Possible errors: +// +// - [ErrShortBytes] (too few bytes) +// - TypeError (not a number (int/float)) +func ReadJSONNumberBytes(b []byte) (number json.Number, o []byte, err error) { + if len(b) < 1 { + return "", nil, ErrShortBytes + } + if i, o, err := ReadInt64Bytes(b); err == nil { + return json.Number(strconv.FormatInt(i, 10)), o, nil + } + f, o, err := ReadFloat64Bytes(b) + if err == nil { + return json.Number(strconv.FormatFloat(f, 'f', -1, 64)), o, nil + } + return "", nil, TypeError{Method: NumberType, Encoded: getType(b[0])} +} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go new file mode 100644 index 000000000000..585a67fdb5ca --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/size.go @@ -0,0 +1,40 @@ +package msgp + +// The sizes provided +// are the worst-case +// encoded sizes for +// each type. For variable- +// length types ([]byte, string), +// the total encoded size is +// the prefix size plus the +// length of the object. +const ( + Int64Size = 9 + IntSize = Int64Size + UintSize = Int64Size + Int8Size = 2 + Int16Size = 3 + Int32Size = 5 + Uint8Size = 2 + ByteSize = Uint8Size + Uint16Size = 3 + Uint32Size = 5 + Uint64Size = Int64Size + Float64Size = 9 + Float32Size = 5 + Complex64Size = 10 + Complex128Size = 18 + + DurationSize = Int64Size + TimeSize = 15 + BoolSize = 1 + NilSize = 1 + JSONNumberSize = Int64Size // Same as Float64Size + + MapHeaderSize = 5 + ArrayHeaderSize = 5 + + BytesPrefixSize = 5 + StringPrefixSize = 5 + ExtensionPrefixSize = 6 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go new file mode 100644 index 000000000000..7d36bfb1e3fb --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -0,0 +1,37 @@ +//go:build (!purego && !appengine) || (!appengine && purego && unsafe) +// +build !purego,!appengine !appengine,purego,unsafe + +package msgp + +import ( + "unsafe" +) + +// NOTE: +// all of the definition in this file +// should be repeated in appengine.go, +// but without using unsafe + +const ( + // spec says int and uint are always + // the same size, but that int/uint + // size may not be machine word size + smallint = unsafe.Sizeof(int(0)) == 4 +) + +// UnsafeString returns the byte slice as a volatile string +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// UnsafeBytes returns the string as a byte slice +// +// Deprecated: +// Since this code is no longer used by the code generator, +// UnsafeBytes(s) is precisely equivalent to []byte(s) +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go new file mode 100644 index 000000000000..352350f90422 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write.go @@ -0,0 +1,886 @@ +package msgp + +import ( + "encoding/binary" + "encoding/json" + "errors" + "io" + "math" + "reflect" + "sync" + "time" +) + +const ( + // min buffer size for the writer + minWriterSize = 18 +) + +// Sizer is an interface implemented +// by types that can estimate their +// size when MessagePack encoded. +// This interface is optional, but +// encoding/marshaling implementations +// may use this as a way to pre-allocate +// memory for serialization. +type Sizer interface { + Msgsize() int +} + +var ( + // Nowhere is an io.Writer to nowhere + Nowhere io.Writer = nwhere{} + + btsType = reflect.TypeOf(([]byte)(nil)) + writerPool = sync.Pool{ + New: func() interface{} { + return &Writer{buf: make([]byte, 2048)} + }, + } +) + +func popWriter(w io.Writer) *Writer { + wr := writerPool.Get().(*Writer) + wr.Reset(w) + return wr +} + +func pushWriter(wr *Writer) { + wr.w = nil + wr.wloc = 0 + writerPool.Put(wr) +} + +// freeW frees a writer for use +// by other processes. It is not necessary +// to call freeW on a writer. However, maintaining +// a reference to a *Writer after calling freeW on +// it will cause undefined behavior. +func freeW(w *Writer) { pushWriter(w) } + +// Require ensures that cap(old)-len(old) >= extra. +func Require(old []byte, extra int) []byte { + l := len(old) + c := cap(old) + r := l + extra + if c >= r { + return old + } else if l == 0 { + return make([]byte, 0, extra) + } + // the new size is the greater + // of double the old capacity + // and the sum of the old length + // and the number of new bytes + // necessary. + c <<= 1 + if c < r { + c = r + } + n := make([]byte, l, c) + copy(n, old) + return n +} + +// nowhere writer +type nwhere struct{} + +func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } + +// Marshaler is the interface implemented +// by types that know how to marshal themselves +// as MessagePack. MarshalMsg appends the marshalled +// form of the object to the provided +// byte slice, returning the extended +// slice and any errors encountered. +type Marshaler interface { + MarshalMsg([]byte) ([]byte, error) +} + +// Encodable is the interface implemented +// by types that know how to write themselves +// as MessagePack using a *msgp.Writer. +type Encodable interface { + EncodeMsg(*Writer) error +} + +// Writer is a buffered writer +// that can be used to write +// MessagePack objects to an io.Writer. +// You must call *Writer.Flush() in order +// to flush all of the buffered data +// to the underlying writer. +type Writer struct { + w io.Writer + buf []byte + wloc int +} + +// NewWriter returns a new *Writer. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return popWriter(w) +} + +// NewWriterSize returns a writer with a custom buffer size. +func NewWriterSize(w io.Writer, sz int) *Writer { + // we must be able to require() 'minWriterSize' + // contiguous bytes, so that is the + // practical minimum buffer size + if sz < minWriterSize { + sz = minWriterSize + } + buf := make([]byte, sz) + return NewWriterBuf(w, buf) +} + +// NewWriterBuf returns a writer with a provided buffer. +// 'buf' is not used when the capacity is smaller than 18, +// custom buffer is allocated instead. +func NewWriterBuf(w io.Writer, buf []byte) *Writer { + if cap(buf) < minWriterSize { + buf = make([]byte, minWriterSize) + } + buf = buf[:cap(buf)] + return &Writer{ + w: w, + buf: buf, + } +} + +// Encode encodes an Encodable to an io.Writer. +func Encode(w io.Writer, e Encodable) error { + wr := NewWriter(w) + err := e.EncodeMsg(wr) + if err == nil { + err = wr.Flush() + } + freeW(wr) + return err +} + +func (mw *Writer) flush() error { + if mw.wloc == 0 { + return nil + } + n, err := mw.w.Write(mw.buf[:mw.wloc]) + if err != nil { + if n > 0 { + mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) + } + return err + } + mw.wloc = 0 + return nil +} + +// Flush flushes all of the buffered +// data to the underlying writer. +func (mw *Writer) Flush() error { return mw.flush() } + +// Buffered returns the number bytes in the write buffer +func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) bufsize() int { return len(mw.buf) } + +// NOTE: this should only be called with +// a number that is guaranteed to be less than +// len(mw.buf). typically, it is called with a constant. +// +// NOTE: this is a hot code path +func (mw *Writer) require(n int) (int, error) { + c := len(mw.buf) + wl := mw.wloc + if c-wl < n { + if err := mw.flush(); err != nil { + return 0, err + } + wl = mw.wloc + } + mw.wloc += n + return wl, nil +} + +func (mw *Writer) Append(b ...byte) error { + if mw.avail() < len(b) { + err := mw.flush() + if err != nil { + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], b) + return nil +} + +// push one byte onto the buffer +// +// NOTE: this is a hot code path +func (mw *Writer) push(b byte) error { + if mw.wloc == len(mw.buf) { + if err := mw.flush(); err != nil { + return err + } + } + mw.buf[mw.wloc] = b + mw.wloc++ + return nil +} + +func (mw *Writer) prefix8(b byte, u uint8) error { + const need = 2 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu8(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix16(b byte, u uint16) error { + const need = 3 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu16(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix32(b byte, u uint32) error { + const need = 5 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu32(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix64(b byte, u uint64) error { + const need = 9 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu64(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +// Write implements io.Writer, and writes +// data directly to the buffer. +func (mw *Writer) Write(p []byte) (int, error) { + l := len(p) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return 0, err + } + if l > len(mw.buf) { + return mw.w.Write(p) + } + } + mw.wloc += copy(mw.buf[mw.wloc:], p) + return l, nil +} + +// implements io.WriteString +func (mw *Writer) writeString(s string) error { + l := len(s) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return err + } + if l > len(mw.buf) { + _, err := io.WriteString(mw.w, s) + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], s) + return nil +} + +// Reset changes the underlying writer used by the Writer +func (mw *Writer) Reset(w io.Writer) { + mw.buf = mw.buf[:cap(mw.buf)] + mw.w = w + mw.wloc = 0 +} + +// WriteMapHeader writes a map header of the given +// size to the writer +func (mw *Writer) WriteMapHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixmap(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(mmap16, uint16(sz)) + default: + return mw.prefix32(mmap32, sz) + } +} + +// WriteArrayHeader writes an array header of the +// given size to the writer +func (mw *Writer) WriteArrayHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixarray(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(marray16, uint16(sz)) + default: + return mw.prefix32(marray32, sz) + } +} + +// WriteNil writes a nil byte to the buffer +func (mw *Writer) WriteNil() error { + return mw.push(mnil) +} + +// WriteFloat writes a float to the writer as either float64 +// or float32 when it represents the exact same value +func (mw *Writer) WriteFloat(f float64) error { + f32 := float32(f) + if float64(f32) == f { + return mw.prefix32(mfloat32, math.Float32bits(f32)) + } + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat64 writes a float64 to the writer +func (mw *Writer) WriteFloat64(f float64) error { + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat32 writes a float32 to the writer +func (mw *Writer) WriteFloat32(f float32) error { + return mw.prefix32(mfloat32, math.Float32bits(f)) +} + +// WriteDuration writes a time.Duration to the writer +func (mw *Writer) WriteDuration(d time.Duration) error { + return mw.WriteInt64(int64(d)) +} + +// WriteInt64 writes an int64 to the writer +func (mw *Writer) WriteInt64(i int64) error { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return mw.push(wfixint(uint8(i))) + case i <= math.MaxInt16: + return mw.prefix16(mint16, uint16(i)) + case i <= math.MaxInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } + } + switch { + case i >= -32: + return mw.push(wnfixint(int8(i))) + case i >= math.MinInt8: + return mw.prefix8(mint8, uint8(i)) + case i >= math.MinInt16: + return mw.prefix16(mint16, uint16(i)) + case i >= math.MinInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } +} + +// WriteInt8 writes an int8 to the writer +func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } + +// WriteInt16 writes an int16 to the writer +func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } + +// WriteInt32 writes an int32 to the writer +func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } + +// WriteInt writes an int to the writer +func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } + +// WriteUint64 writes a uint64 to the writer +func (mw *Writer) WriteUint64(u uint64) error { + switch { + case u <= (1<<7)-1: + return mw.push(wfixint(uint8(u))) + case u <= math.MaxUint8: + return mw.prefix8(muint8, uint8(u)) + case u <= math.MaxUint16: + return mw.prefix16(muint16, uint16(u)) + case u <= math.MaxUint32: + return mw.prefix32(muint32, uint32(u)) + default: + return mw.prefix64(muint64, u) + } +} + +// WriteByte is analogous to WriteUint8 +func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } + +// WriteUint8 writes a uint8 to the writer +func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint16 writes a uint16 to the writer +func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint32 writes a uint32 to the writer +func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint writes a uint to the writer +func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } + +// WriteBytes writes binary as 'bin' to the writer +func (mw *Writer) WriteBytes(b []byte) error { + sz := uint32(len(b)) + var err error + switch { + case sz <= math.MaxUint8: + err = mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mbin16, uint16(sz)) + default: + err = mw.prefix32(mbin32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(b) + return err +} + +// WriteBytesHeader writes just the size header +// of a MessagePack 'bin' object. The user is responsible +// for then writing 'sz' more bytes into the stream. +func (mw *Writer) WriteBytesHeader(sz uint32) error { + switch { + case sz <= math.MaxUint8: + return mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mbin16, uint16(sz)) + default: + return mw.prefix32(mbin32, sz) + } +} + +// WriteBool writes a bool to the writer +func (mw *Writer) WriteBool(b bool) error { + if b { + return mw.push(mtrue) + } + return mw.push(mfalse) +} + +// WriteString writes a messagepack string to the writer. +// (This is NOT an implementation of io.StringWriter) +func (mw *Writer) WriteString(s string) error { + sz := uint32(len(s)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + return mw.writeString(s) +} + +// WriteStringHeader writes just the string size +// header of a MessagePack 'str' object. The user +// is responsible for writing 'sz' more valid UTF-8 +// bytes to the stream. +func (mw *Writer) WriteStringHeader(sz uint32) error { + switch { + case sz <= 31: + return mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + return mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mstr16, uint16(sz)) + default: + return mw.prefix32(mstr32, sz) + } +} + +// WriteStringFromBytes writes a 'str' object +// from a []byte. +func (mw *Writer) WriteStringFromBytes(str []byte) error { + sz := uint32(len(str)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(str) + return err +} + +// WriteComplex64 writes a complex64 to the writer +func (mw *Writer) WriteComplex64(f complex64) error { + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = Complex64Extension + big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) + big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) + return nil +} + +// WriteComplex128 writes a complex128 to the writer +func (mw *Writer) WriteComplex128(f complex128) error { + o, err := mw.require(18) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = Complex128Extension + big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) + big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) + return nil +} + +// WriteMapStrStr writes a map[string]string to the writer +func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteString(val) + if err != nil { + return + } + } + return nil +} + +// WriteMapStrIntf writes a map[string]interface to the writer +func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteIntf(val) + if err != nil { + return + } + } + return +} + +// WriteTime writes a time.Time object to the wire. +// +// Time is encoded as Unix time, which means that +// location (time zone) data is removed from the object. +// The encoded object itself is 12 bytes: 8 bytes for +// a big-endian 64-bit integer denoting seconds +// elapsed since "zero" Unix time, followed by 4 bytes +// for a big-endian 32-bit signed integer denoting +// the nanosecond offset of the time. This encoding +// is intended to ease portability across languages. +// (Note that this is *not* the standard time.Time +// binary encoding, because its implementation relies +// heavily on the internal representation used by the +// time package.) +func (mw *Writer) WriteTime(t time.Time) error { + t = t.UTC() + o, err := mw.require(15) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = TimeExtension + putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) + return nil +} + +// WriteTimeExt will write t using the official msgpack extension spec. +// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type +func (mw *Writer) WriteTimeExt(t time.Time) error { + // Time rounded towards zero. + secPrec := t.Truncate(time.Second) + remain := t.Sub(secPrec).Nanoseconds() + asSecs := secPrec.Unix() + switch { + case remain == 0 && asSecs > 0 && asSecs <= math.MaxUint32: + // 4 bytes + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(msgTimeExtension) + binary.BigEndian.PutUint32(mw.buf[o+2:], uint32(asSecs)) + return nil + case asSecs < 0 || asSecs >= (1<<34): + // 12 bytes + o, err := mw.require(12 + 3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = byte(msgTimeExtension) + binary.BigEndian.PutUint32(mw.buf[o+3:], uint32(remain)) + binary.BigEndian.PutUint64(mw.buf[o+3+4:], uint64(asSecs)) + default: + // 8 bytes + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(msgTimeExtension) + binary.BigEndian.PutUint64(mw.buf[o+2:], uint64(asSecs)|(uint64(remain)<<34)) + } + return nil +} + +// WriteJSONNumber writes the json.Number to the stream as either integer or float. +func (mw *Writer) WriteJSONNumber(n json.Number) error { + if n == "" { + // The zero value outputs the 0 integer. + return mw.push(0) + } + ii, err := n.Int64() + if err == nil { + return mw.WriteInt64(ii) + } + ff, err := n.Float64() + if err == nil { + return mw.WriteFloat(ff) + } + return err +} + +// WriteIntf writes the concrete type of 'v'. +// WriteIntf will error if 'v' is not one of the following: +// - A bool, float, string, []byte, int, uint, or complex +// - A map of supported types (with string keys) +// - An array or slice of supported types +// - A pointer to a supported type +// - A type that satisfies the msgp.Encodable interface +// - A type that satisfies the msgp.Extension interface +func (mw *Writer) WriteIntf(v interface{}) error { + if v == nil { + return mw.WriteNil() + } + switch v := v.(type) { + + // preferred interfaces + + case Encodable: + return v.EncodeMsg(mw) + case Extension: + return mw.WriteExtension(v) + + // concrete types + + case bool: + return mw.WriteBool(v) + case float32: + return mw.WriteFloat32(v) + case float64: + return mw.WriteFloat64(v) + case complex64: + return mw.WriteComplex64(v) + case complex128: + return mw.WriteComplex128(v) + case uint8: + return mw.WriteUint8(v) + case uint16: + return mw.WriteUint16(v) + case uint32: + return mw.WriteUint32(v) + case uint64: + return mw.WriteUint64(v) + case uint: + return mw.WriteUint(v) + case int8: + return mw.WriteInt8(v) + case int16: + return mw.WriteInt16(v) + case int32: + return mw.WriteInt32(v) + case int64: + return mw.WriteInt64(v) + case int: + return mw.WriteInt(v) + case string: + return mw.WriteString(v) + case []byte: + return mw.WriteBytes(v) + case map[string]string: + return mw.WriteMapStrStr(v) + case map[string]interface{}: + return mw.WriteMapStrIntf(v) + case time.Time: + return mw.WriteTime(v) + case time.Duration: + return mw.WriteDuration(v) + case json.Number: + return mw.WriteJSONNumber(v) + } + + val := reflect.ValueOf(v) + if !isSupported(val.Kind()) || !val.IsValid() { + return errors.New("msgp: type " + val.String() + " not supported") + } + + switch val.Kind() { + case reflect.Ptr: + if val.IsNil() { + return mw.WriteNil() + } + return mw.WriteIntf(val.Elem().Interface()) + case reflect.Slice: + return mw.writeSlice(val) + case reflect.Map: + return mw.writeMap(val) + } + return &ErrUnsupportedType{T: val.Type()} +} + +func (mw *Writer) writeMap(v reflect.Value) (err error) { + if v.Type().Key().Kind() != reflect.String { + return errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + err = mw.WriteMapHeader(uint32(len(ks))) + if err != nil { + return + } + for _, key := range ks { + val := v.MapIndex(key) + err = mw.WriteString(key.String()) + if err != nil { + return + } + err = mw.WriteIntf(val.Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeSlice(v reflect.Value) (err error) { + // is []byte + if v.Type().ConvertibleTo(btsType) { + return mw.WriteBytes(v.Bytes()) + } + + sz := uint32(v.Len()) + err = mw.WriteArrayHeader(sz) + if err != nil { + return + } + for i := uint32(0); i < sz; i++ { + err = mw.WriteIntf(v.Index(int(i)).Interface()) + if err != nil { + return + } + } + return +} + +// is the reflect.Kind encodable? +func isSupported(k reflect.Kind) bool { + switch k { + case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: + return false + default: + return true + } +} + +// GuessSize guesses the size of the underlying +// value of 'i'. If the underlying value is not +// a simple builtin (or []byte), GuessSize defaults +// to 512. +func GuessSize(i interface{}) int { + if i == nil { + return NilSize + } + + switch i := i.(type) { + case Sizer: + return i.Msgsize() + case Extension: + return ExtensionPrefixSize + i.Len() + case float64: + return Float64Size + case float32: + return Float32Size + case uint8, uint16, uint32, uint64, uint: + return UintSize + case int8, int16, int32, int64, int: + return IntSize + case []byte: + return BytesPrefixSize + len(i) + case string: + return StringPrefixSize + len(i) + case complex64: + return Complex64Size + case complex128: + return Complex128Size + case bool: + return BoolSize + case map[string]interface{}: + s := MapHeaderSize + for key, val := range i { + s += StringPrefixSize + len(key) + GuessSize(val) + } + return s + case map[string]string: + s := MapHeaderSize + for key, val := range i { + s += 2*StringPrefixSize + len(key) + len(val) + } + return s + default: + return 512 + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go new file mode 100644 index 000000000000..704501746a4c --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -0,0 +1,520 @@ +package msgp + +import ( + "encoding/binary" + "encoding/json" + "errors" + "math" + "reflect" + "time" +) + +// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) +func ensure(b []byte, sz int) ([]byte, int) { + l := len(b) + c := cap(b) + if c-l < sz { + o := make([]byte, (2*c)+sz) // exponential growth + n := copy(o, b) + return o[:n+sz], n + } + return b[:l+sz], l +} + +// AppendMapHeader appends a map header with the +// given size to the slice +func AppendMapHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixmap(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], mmap16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], mmap32, sz) + return o + } +} + +// AppendArrayHeader appends an array header with +// the given size to the slice +func AppendArrayHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixarray(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], marray16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], marray32, sz) + return o + } +} + +// AppendNil appends a 'nil' byte to the slice +func AppendNil(b []byte) []byte { return append(b, mnil) } + +// AppendFloat appends a float to the slice as either float64 +// or float32 when it represents the exact same value +func AppendFloat(b []byte, f float64) []byte { + f32 := float32(f) + if float64(f32) == f { + return AppendFloat32(b, f32) + } + return AppendFloat64(b, f) +} + +// AppendFloat64 appends a float64 to the slice +func AppendFloat64(b []byte, f float64) []byte { + o, n := ensure(b, Float64Size) + prefixu64(o[n:], mfloat64, math.Float64bits(f)) + return o +} + +// AppendFloat32 appends a float32 to the slice +func AppendFloat32(b []byte, f float32) []byte { + o, n := ensure(b, Float32Size) + prefixu32(o[n:], mfloat32, math.Float32bits(f)) + return o +} + +// AppendDuration appends a time.Duration to the slice +func AppendDuration(b []byte, d time.Duration) []byte { + return AppendInt64(b, int64(d)) +} + +// AppendInt64 appends an int64 to the slice +func AppendInt64(b []byte, i int64) []byte { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return append(b, wfixint(uint8(i))) + case i <= math.MaxInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i <= math.MaxInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } + } + switch { + case i >= -32: + return append(b, wnfixint(int8(i))) + case i >= math.MinInt8: + o, n := ensure(b, 2) + putMint8(o[n:], int8(i)) + return o + case i >= math.MinInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i >= math.MinInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } +} + +// AppendInt appends an int to the slice +func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt8 appends an int8 to the slice +func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt16 appends an int16 to the slice +func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt32 appends an int32 to the slice +func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } + +// AppendUint64 appends a uint64 to the slice +func AppendUint64(b []byte, u uint64) []byte { + switch { + case u <= (1<<7)-1: + return append(b, wfixint(uint8(u))) + + case u <= math.MaxUint8: + o, n := ensure(b, 2) + putMuint8(o[n:], uint8(u)) + return o + + case u <= math.MaxUint16: + o, n := ensure(b, 3) + putMuint16(o[n:], uint16(u)) + return o + + case u <= math.MaxUint32: + o, n := ensure(b, 5) + putMuint32(o[n:], uint32(u)) + return o + + default: + o, n := ensure(b, 9) + putMuint64(o[n:], u) + return o + + } +} + +// AppendUint appends a uint to the slice +func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint8 appends a uint8 to the slice +func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } + +// AppendByte is analogous to AppendUint8 +func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } + +// AppendUint16 appends a uint16 to the slice +func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint32 appends a uint32 to the slice +func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } + +// AppendBytes appends bytes to the slice as MessagePack 'bin' data +func AppendBytes(b []byte, bts []byte) []byte { + sz := len(bts) + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mbin8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mbin16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mbin32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], bts)] +} + +// AppendBytesHeader appends an 'bin' header with +// the given size to the slice. +func AppendBytesHeader(b []byte, sz uint32) []byte { + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2) + prefixu8(o[n:], mbin8, uint8(sz)) + return o + case sz <= math.MaxUint16: + o, n = ensure(b, 3) + prefixu16(o[n:], mbin16, uint16(sz)) + return o + } + o, n = ensure(b, 5) + prefixu32(o[n:], mbin32, sz) + return o +} + +// AppendBool appends a bool to the slice +func AppendBool(b []byte, t bool) []byte { + if t { + return append(b, mtrue) + } + return append(b, mfalse) +} + +// AppendString appends a string as a MessagePack 'str' to the slice +func AppendString(b []byte, s string) []byte { + sz := len(s) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], s)] +} + +// AppendStringFromBytes appends a []byte +// as a MessagePack 'str' to the slice 'b.' +func AppendStringFromBytes(b []byte, str []byte) []byte { + sz := len(str) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], str)] +} + +// AppendComplex64 appends a complex64 to the slice as a MessagePack extension +func AppendComplex64(b []byte, c complex64) []byte { + o, n := ensure(b, Complex64Size) + o[n] = mfixext8 + o[n+1] = Complex64Extension + big.PutUint32(o[n+2:], math.Float32bits(real(c))) + big.PutUint32(o[n+6:], math.Float32bits(imag(c))) + return o +} + +// AppendComplex128 appends a complex128 to the slice as a MessagePack extension +func AppendComplex128(b []byte, c complex128) []byte { + o, n := ensure(b, Complex128Size) + o[n] = mfixext16 + o[n+1] = Complex128Extension + big.PutUint64(o[n+2:], math.Float64bits(real(c))) + big.PutUint64(o[n+10:], math.Float64bits(imag(c))) + return o +} + +// AppendTime appends a time.Time to the slice as a MessagePack extension +func AppendTime(b []byte, t time.Time) []byte { + o, n := ensure(b, TimeSize) + t = t.UTC() + o[n] = mext8 + o[n+1] = 12 + o[n+2] = TimeExtension + putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) + return o +} + +// AppendTimeExt will write t using the official msgpack extension spec. +// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type +func AppendTimeExt(b []byte, t time.Time) []byte { + // Time rounded towards zero. + secPrec := t.Truncate(time.Second) + remain := t.Sub(secPrec).Nanoseconds() + asSecs := secPrec.Unix() + switch { + case remain == 0 && asSecs > 0 && asSecs <= math.MaxUint32: + // 4 bytes + o, n := ensure(b, 2+4) + o[n+0] = mfixext4 + o[n+1] = byte(msgTimeExtension) + binary.BigEndian.PutUint32(o[n+2:], uint32(asSecs)) + return o + case asSecs < 0 || asSecs >= (1<<34): + // 12 bytes + o, n := ensure(b, 3+12) + o[n+0] = mext8 + o[n+1] = 12 + o[n+2] = byte(msgTimeExtension) + binary.BigEndian.PutUint32(o[n+3:], uint32(remain)) + binary.BigEndian.PutUint64(o[n+3+4:], uint64(asSecs)) + return o + default: + // 8 bytes + o, n := ensure(b, 2+8) + o[n+0] = mfixext8 + o[n+1] = byte(msgTimeExtension) + binary.BigEndian.PutUint64(o[n+2:], uint64(asSecs)|(uint64(remain)<<34)) + return o + } +} + +// AppendMapStrStr appends a map[string]string to the slice +// as a MessagePack map with 'str'-type keys and values +func AppendMapStrStr(b []byte, m map[string]string) []byte { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + for key, val := range m { + b = AppendString(b, key) + b = AppendString(b, val) + } + return b +} + +// AppendMapStrIntf appends a map[string]interface{} to the slice +// as a MessagePack map with 'str'-type keys. +func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + var err error + for key, val := range m { + b = AppendString(b, key) + b, err = AppendIntf(b, val) + if err != nil { + return b, err + } + } + return b, nil +} + +// AppendIntf appends the concrete type of 'i' to the +// provided []byte. 'i' must be one of the following: +// - 'nil' +// - A bool, float, string, []byte, int, uint, or complex +// - A map[string]T where T is another supported type +// - A []T, where T is another supported type +// - A *T, where T is another supported type +// - A type that satisfies the msgp.Marshaler interface +// - A type that satisfies the msgp.Extension interface +func AppendIntf(b []byte, i interface{}) ([]byte, error) { + if i == nil { + return AppendNil(b), nil + } + + // all the concrete types + // for which we have methods + switch i := i.(type) { + case Marshaler: + return i.MarshalMsg(b) + case Extension: + return AppendExtension(b, i) + case bool: + return AppendBool(b, i), nil + case float32: + return AppendFloat32(b, i), nil + case float64: + return AppendFloat64(b, i), nil + case complex64: + return AppendComplex64(b, i), nil + case complex128: + return AppendComplex128(b, i), nil + case string: + return AppendString(b, i), nil + case []byte: + return AppendBytes(b, i), nil + case int8: + return AppendInt8(b, i), nil + case int16: + return AppendInt16(b, i), nil + case int32: + return AppendInt32(b, i), nil + case int64: + return AppendInt64(b, i), nil + case int: + return AppendInt64(b, int64(i)), nil + case uint: + return AppendUint64(b, uint64(i)), nil + case uint8: + return AppendUint8(b, i), nil + case uint16: + return AppendUint16(b, i), nil + case uint32: + return AppendUint32(b, i), nil + case uint64: + return AppendUint64(b, i), nil + case time.Time: + return AppendTime(b, i), nil + case time.Duration: + return AppendDuration(b, i), nil + case map[string]interface{}: + return AppendMapStrIntf(b, i) + case map[string]string: + return AppendMapStrStr(b, i), nil + case json.Number: + return AppendJSONNumber(b, i) + case []interface{}: + b = AppendArrayHeader(b, uint32(len(i))) + var err error + for _, k := range i { + b, err = AppendIntf(b, k) + if err != nil { + return b, err + } + } + return b, nil + } + + var err error + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return b, errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + b = AppendMapHeader(b, uint32(len(ks))) + for _, key := range ks { + val := v.MapIndex(key) + b = AppendString(b, key.String()) + b, err = AppendIntf(b, val.Interface()) + if err != nil { + return nil, err + } + } + return b, nil + case reflect.Array, reflect.Slice: + l := v.Len() + b = AppendArrayHeader(b, uint32(l)) + for i := 0; i < l; i++ { + b, err = AppendIntf(b, v.Index(i).Interface()) + if err != nil { + return b, err + } + } + return b, nil + case reflect.Ptr: + if v.IsNil() { + return AppendNil(b), err + } + b, err = AppendIntf(b, v.Elem().Interface()) + return b, err + default: + return b, &ErrUnsupportedType{T: v.Type()} + } +} + +// AppendJSONNumber appends a json.Number to the slice. +// An error will be returned if the json.Number returns error as both integer and float. +func AppendJSONNumber(b []byte, n json.Number) ([]byte, error) { + if n == "" { + // The zero value outputs the 0 integer. + return append(b, 0), nil + } + ii, err := n.Int64() + if err == nil { + return AppendInt64(b, ii), nil + } + ff, err := n.Float64() + if err == nil { + return AppendFloat(b, ff), nil + } + return b, err +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index 2950fdb42eea..e854d7e84e86 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst, src []byte) error { +func unmarshalJSON(dst []byte, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 5bb3b16c704c..29e629d6674d 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Uint64 returns the protoUint64 as a uint64. +// Int64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index 67f80b6aa078..a13a6b733da8 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "math" "time" ) @@ -152,8 +151,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), // nolint:gosec // >0 checked above. - EndTime: uint64(endT), // nolint:gosec // >0 checked above. + StartTime: uint64(startT), + EndTime: uint64(endT), }) } @@ -202,13 +201,11 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. - s.StartTime = time.Unix(0, v) + s.StartTime = time.Unix(0, int64(val.Uint64())) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. - s.EndTime = time.Unix(0, v) + s.EndTime = time.Unix(0, int64(val.Uint64())) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -251,20 +248,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( - // SpanFlagsTraceFlagsMask is a mask for trace-flags. - // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is - // remote. + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -273,30 +263,26 @@ const ( type SpanKind int32 const ( - // SpanKindInternal indicates that the span represents an internal - // operation within an application, as opposed to an operation happening at - // the boundaries. + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. SpanKindInternal SpanKind = 1 - // SpanKindServer indicates that the span covers server-side handling of an - // RPC or other remote network request. + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. SpanKindServer SpanKind = 2 - // SpanKindClient indicates that the span describes a request to some - // remote service. + // Indicates that the span describes a request to some remote service. SpanKindClient SpanKind = 3 - // SpanKindProducer indicates that the span describes a producer sending a - // message to a broker. Unlike SpanKindClient and SpanKindServer, there is - // often no direct critical path latency relationship between producer and - // consumer spans. A SpanKindProducer span ends when the message was - // accepted by the broker while the logical processing of the message might - // span a much longer time. + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. SpanKindProducer SpanKind = 4 - // SpanKindConsumer indicates that the span describes a consumer receiving - // a message from a broker. Like SpanKindProducer, there is often no direct - // critical path latency relationship between producer and consumer spans. + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied +// Event is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -326,7 +312,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), //nolint:gosec // >0 checked above + Time: uint64(t), }) } @@ -361,8 +347,7 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. - se.Time = time.Unix(0, v) + se.Time = time.Unix(0, int64(val.Uint64())) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -380,11 +365,10 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// SpanLink is a reference from the current span to another span in the same -// trace or in a different trace. For example, this can be used in batching -// operations, where a single batch handler processes multiple requests from -// different traces or when the handler receives a request from a different -// project. +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index a2802764f811..1217776ead1e 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,19 +3,17 @@ package telemetry -// StatusCode is the status of a Span. -// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // StatusCodeUnset is the default status. + // The default status. StatusCodeUnset StatusCode = 0 - // StatusCodeOK is used when the Span has been validated by an Application - // developer or Operator to have completed successfully. + // The Span has been validated by an Application developer or Operator to + // have completed successfully. StatusCodeOK StatusCode = 1 - // StatusCodeError is used when the Span contains an error. + // The Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 44197b80849c..69a348f0f064 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// ResourceSpans is a collection of ScopeSpans from a Resource. +// A collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// ScopeSpans is a collection of Spans produced by an InstrumentationScope. +// A collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 022768bb5018..0dd01b063a34 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + package telemetry import ( @@ -21,7 +23,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint:unused // This is indeed used. + noCmp [0]func() //nolint: unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -90,7 +92,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. + return Value{num: uint64(v), any: ValueKindInt64} } // Float64Value returns a [Value] for a float64. @@ -162,7 +164,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) //nolint:gosec // Bounded. + return int64(v.num) // nolint: gosec } // AsBool returns the value held by v as a bool. @@ -307,13 +309,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return string(v.asBytes()) + return fmt.Sprint(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -341,7 +343,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. + }{strconv.FormatInt(int64(v.num), 10)}) case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 815d271ffb26..6ebea12a9e9f 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,7 +6,6 @@ package sdk import ( "encoding/json" "fmt" - "math" "reflect" "runtime" "strings" @@ -17,7 +16,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -86,12 +85,7 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - n := int64(len(attrs)) - if n > 0 { - s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. - min(n, math.MaxUint32), - ) - } + s.span.DroppedAttrs += uint32(len(attrs)) return } @@ -127,13 +121,8 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { - n := len(attrs) if limit == 0 { - var out uint32 - if n > 0 { - out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. - } - return nil, out + return nil, uint32(len(attrs)) } if limit < 0 { @@ -141,12 +130,8 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - if n < 0 { - n = 0 - } - - limit = min(n, limit) - return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index e09acf022fa1..cbcfabde3b1a 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,7 +5,6 @@ package sdk import ( "context" - "math" "time" "go.opentelemetry.io/otel/trace" @@ -22,20 +21,15 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start( - ctx context.Context, - name string, - opts ...trace.SpanStartOption, -) (context.Context, trace.Span) { - var psc, sc trace.SpanContext +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &sc) + t.start(ctx, span, &psc, &sampled, &span.spanContext) span.sampled.Store(sampled) - span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -64,13 +58,7 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -var intToUint32Bound = min(math.MaxInt, math.MaxUint32) - -func (t tracer) traces( - name string, - cfg trace.SpanConfig, - sc, psc trace.SpanContext, -) (*telemetry.Traces, *telemetry.Span) { +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -85,16 +73,11 @@ func (t tracer) traces( links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - n := len(links) - if n > 0 { - bounded := max(min(n, intToUint32Bound), 0) - span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. - } + span.DroppedLinks = uint32(len(links)) } else { if limit > 0 { n := max(len(links)-limit, 0) - bounded := min(n, intToUint32Bound) - span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + span.DroppedLinks = uint32(n) links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children new file mode 100644 index 000000000000..986a246a6c05 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/children differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes new file mode 100644 index 000000000000..38b8999600c8 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/nodes differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text new file mode 100644 index 000000000000..b151d97de276 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -0,0 +1 @@ +bolzano-altoadigevje-og-hornnes3-website-us-west-2bomlocustomer-ocienciabonavstackarasjoketokuyamashikokuchuobondigitaloceanspacesakurastoragextraspace-to-rentalstomakomaibarabonesakuratanishikatakazakindustriesteinkjerepbodynaliasnesoddeno-staginglobodoes-itcouldbeworfarsundiskussionsbereichateblobanazawarszawashtenawsapprunnerdpoliticaarparliamenthickarasuyamasoybookonlineboomladeskierniewiceboschristmasakilovecollegefantasyleaguedagestangebostik-serveronagasukeyword-oncillahppictetcieszynishikatsuragit-repostre-totendofinternet-dnsakurawebredirectmeiwamizawabostonakijinsekikogentlentapisa-geekaratsuginamikatagamimozaporizhzhegurinfinitigooglecode-builder-stg-buildereporthruhereclaimsakyotanabellunord-odalvdalcest-le-patron-k3salangenishikawazukamishihorobotdashgabadaddjabbotthuathienhuebouncemerckmsdscloudisrechtrafficplexus-4boutiquebecologialaichaugianglogowegroweibolognagasakikugawaltervistaikillondonetskarelianceboutireserve-onlineboyfriendoftheinternetflixn--11b4c3ditchyouriparmabozen-sudtirolondrinaplesknsalatrobeneventoeidsvollorenskogloomy-gatewaybozen-suedtirolovableprojectjeldsundivtasvuodnakamai-stagingloppennebplaceditorxn--12c1fe0bradescotaruinternationalovepoparochernihivgubamblebtimnetzjaworznotebook-fips3-fips-us-gov-east-1brandivttasvuotnakamuratajirintlon-2brasiliadboxoslodingenishimerabravendbarcelonagawakuyabukikiraragusabaerobatickets3-fips-us-gov-west-1bresciaogashimadachicappabianiceobridgestonebrindisiciliabroadwaybroke-itvedestrandixn--12cfi8ixb8lovesickarlsoybrokerevistathellebrothermesserlidlplfinancialpusercontentjmaxxxn--12co0c3b4evalleaostargets-itjomeldalucaniabrumunddaluccampobassociatesalon-1brusselsaloonishinomiyashironobryanskiervadsoccerhcloudyclusterbrynebweirbzhitomirumaintenanceclothingdustdatadetectoyouracngovtoystre-slidrettozawacnpyatigorskjakamaiedge-stagingreatercnsapporocntozsdeliverycodebergrayjayleaguesardegnarutoshimatta-varjjatranatalcodespotenzakopanecoffeedbackanagawatsonrendercommunity-prochowicecomockashiharacompanyantaishinomakimobetsulifestylefrakkestadurumisakindlegnicahcesuolohmusashimurayamaizuruhr-uni-bochuminamiechizenisshingucciminamifuranocomparemarkerryhotelsardiniacomputercomsecretrosnubarclays3-me-south-1condoshiibabymilk3conferenceconstructioniyodogawaconsuladobeio-static-accesscamdvrcampaniaconsultantranbyconsultingretakamoriokakudamatsuecontactivetrail-central-1contagematsubaracontractorstabacgiangiangryconvexecute-apictureshinordkappaviacookingrimstadynathomebuiltwithdarklangevagrarchitectestingripeeweeklylotterycooperativano-frankivskjervoyagecoprofesionalchikugodaddyn-o-saureadymadethis-a-anarchistjordalshalsenl-ams-1corsicafederationfabricable-modemoneycosenzamamidorivnecosidnsdojoburgriwataraindroppdalcouchpotatofriesarlcouncilcouponstackitagawacozoracpservernamegataitogodoesntexisteingeekashiwaracqcxn--1lqs71dyndns-at-homedepotrani-andria-barletta-trani-andriacrankyotobetsulubin-dsldyndns-at-workisboringsakershusrcfdyndns-blogsiteleaf-south-1crdyndns-freeboxosarpsborgroks-theatrentin-sud-tirolcreditcardyndns-homednsarufutsunomiyawakasaikaitakokonoecreditunioncremonasharis-a-bulls-fancrewp2cricketnedalcrimeast-kazakhstanangercrispawnextdirectraniandriabarlettatraniandriacrminamiiseharacrotonecrownipfizercrsasayamacruisesaseboknowsitallcryptonomichiharacuisinellamdongnairflowersassaris-a-candidatecuneocuritibackdropalermobarag-cloud-charitydalp1cutegirlfriendyndns-ipgwangjulvikashiwazakizunokuniminamiashigarafedoraprojectransiphdfcbankasserverrankoshigayakagefeirafembetsukubankasukabeautypedreamhosterscrapper-sitefermodalenferraraferraris-a-celticsfanferreroticallynxn--2scrj9cargoboavistanbulsan-sudtiroluhanskarmoyfetsundyndns-remotewdhlx3fgroundhandlingroznyfhvalerfilegear-sg-1filminamiminowafinalfinancefinnoyfirebaseapphilipscrappingrphonefosscryptedyndns-serverdalfirenetgamerscrysecuritytacticscwestus2firenzeaburfirestonefirmdaleilaocairportranslatedyndns-webhareidsbergroks-thisayamanobearalvahkikonaikawachinaganoharamcoachampionshiphoplixn--1qqw23afishingokasellfyresdalfitjarfitnessettsurugashimamurogawafjalerfkasumigaurayasudaflesbergrueflickragerotikagoshimandalflierneflirflogintohmangoldpoint2thisamitsukefloppymntransportefloraclegovcloudappservehttpbincheonflorencefloripadualstackasuyakumoduminamioguni5floristanohatakaharunservehumourfloromskoguidefinimalopolskanittedalfltransurlflutterflowhitesnowflakeflyfncarrdiyfndyndns-wikinkobayashimofusadojin-the-bandairlinemurorangecloudplatformshakotanpachihayaakasakawaharacingrondarfoolfor-ourfor-somedusajserveircasacampinagrandebulsan-suedtirolukowesleyfor-theaterfordebianforexrotheworkpccwhminamisanrikubetsupersaleksvikaszubytemarketingvollforgotdnserveminecraftrapanikkoelnforli-cesena-forlicesenaforlikescandypopensocialforsalesforceforsandasuoloisirservemp3fortalfosneservep2photographysiofotravelersinsurancefoxn--30rr7yfozfr-1fr-par-1fr-par-2franalytics-gatewayfredrikstadyndns-worksauheradyndns-mailfreedesktopazimuthaibinhphuocprapidyndns1freemyiphostyhostinguitarservepicservequakefreesitefreetlservesarcasmilefreightravinhlonganfrenchkisshikirovogradoyfreseniuservicebuskerudynnsaveincloudyndns-office-on-the-webflowtest-iservebloginlinefriuli-v-giuliarafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfrogansevastopolitiendafrognfrolandynservebbsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriafrom-akamaiorigin-stagingujaratmetacentruminamitanefrom-alfrom-arfrom-azureedgecompute-1from-caltanissettainaircraftraeumtgeradealstahaugesunderfrom-cockpitrdynuniversitysvardofrom-ctrentin-sudtirolfrom-dcasertaipeigersundnparsaltdaluroyfrom-decafjsevenassieradzfrom-flatangerfrom-gap-southeast-3from-higashiagatsumagoianiafrom-iafrom-idynv6from-ilfrom-in-vpncashorokanaiefrom-ksewhoswholidayfrom-kyfrom-langsonyatomigrationfrom-mangyshlakamaized-stagingujohanamakinoharafrom-mdynvpnplusavonarviikamisatokonamerikawauefrom-meetrentin-sued-tirolfrom-mihamadanangoguchilloutsystemscloudscalebookinghosteurodirfrom-mnfrom-modellingulenfrom-msexyfrom-mtnfrom-ncasinordeste-idclkarpaczest-a-la-maisondre-landray-dnsaludrayddns-ipartintuitjxn--1ck2e1barclaycards3-globalatinabelementorayomitanobservableusercontentateyamauth-fipstmninomiyakonojosoyrovnoticeableitungsenirasakibxos3-ca-central-180reggio-emilia-romagnaroyolasitebinordlandeus-canvasitebizenakanojogaszkolamericanfamilyds3-ap-south-12hparallelimodxboxeroxjavald-aostaticsxmitakeharaugustow-corp-staticblitzgorzeleccocotteatonamifunebetsuikirkenes3-ap-northeast-2ixn--0trq7p7nninjambylive-oninohekinanporovigonnakasatsunaibigawaukraanghkembuchikumagayagawakkanaibetsubame-central-123websitebuildersvp4from-ndyroyrvikingrongrossetouchijiwadedyn-berlincolnfrom-nefrom-nhlfanfrom-njsheezyfrom-nminamiuonumatsunofrom-nvalled-aostargithubusercontentrentin-suedtirolfrom-nysagamiharafrom-ohdancefrom-okegawafrom-orfrom-palmasfjordenfrom-pratohnoshookuwanakanotoddenfrom-ris-a-chefashionstorebaseljordyndns-picsbssaudafrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagemakerfrom-val-daostavalleyfrom-vtrentino-a-adigefrom-wafrom-wiardwebspaceconfigunmarnardalfrom-wvalledaostarnobrzeguovdageaidnunjargausdalfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingushikamifuranorth-kazakhstanfujiiderafujikawaguchikonefujiminokamoenairtelebitbucketrzynh-servebeero-stageiseiroutingthecloudfujinomiyadappnodearthainguyenfujiokazakiryuohkurafujisatoshoeshellfujisawafujishiroishidakabiratoridediboxafujitsuruokakamigaharafujiyoshidatsunanjoetsumidaklakasamatsudogadobeioruntimedicinakaiwanairforcentralus-1fukayabeagleboardfukuchiyamadattorelayfukudomigawafukuis-a-conservativefsnoasakakinokiafukumitsubishigakisarazure-apigeefukuokakegawafukuroishikariwakunigamiharuovatlassian-dev-builderfukusakishiwadattoweberlevagangaviikanonjis-a-cpanelfukuyamagatakahashimamakisofukushimaniwamannordre-landfunabashiriuchinadavvenjargamvikatowicefunagatakahatakaishimokawafunahashikamiamakusatsumasendaisenergyeonggiizefundfunkfeuerfunnelshimonitayanagitapphutholdingsmall-websozais-a-cubicle-slaveroykenfuoiskujukuriyamaoris-a-democratrentino-aadigefuosskodjeezfurubirafurudonordreisa-hockeynutwentertainmentrentino-alto-adigefurukawaiishoppingxn--3bst00minamiyamashirokawanabeepsondriobranconagarahkkeravjunusualpersonfusoctrangyeongnamdinhs-heilbronnoysundfussaikisosakitahatakamatsukawafutabayamaguchinomihachimanagementrentino-altoadigefutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinairtrafficmanagerfuturecmshimonosekikawafuturehosting-clusterfuturemailingzfvghakuis-a-doctoruncontainershimotsukehakusandnessjoenhaldenhalfmoonscaleforcehalsaitamatsukuris-a-financialadvisor-aurdalham-radio-ophuyenhamburghammarfeastasiahamurakamigoris-a-fullstackaufentigerhanamigawahanawahandahandcraftedugit-pages-researchedmarketplacehangglidinghangoutrentino-s-tirolhannannestadhannoshiroomghanoiphxn--3ds443ghanyuzenhappoumuginowaniihamatamakawajimap-southeast-4hasamazoncognitoigawahasaminami-alpshimotsumahashbanghasudahasura-appigboatshinichinanhasvikautokeinotionhatenablogspotrentino-stirolhatenadiaryhatinhachiojiyachiyodazaifudaigojomedio-campidano-mediocampidanomediohatogayachtshinjournalistorfjordhatoyamazakitakatakanezawahatsukaichikawamisatohokkaidontexistmein-iservschulegalleryhattfjelldalhayashimamotobusells-for-lesshinjukuleuvenicehazuminobushibuyahabacninhbinhdinhktrentino-sud-tirolhelpgfoggiahelsinkitakyushunantankazohemneshinkamigotoyokawahemsedalhepforgeblockshinshinotsupplyhetemlbfanheyflowienhigashichichibuzzhigashihiroshimanehigashiizumozakitamihokksundhigashikagawahigashikagurasoedahigashikawakitaaikitamotosumy-routerhigashikurumegurownproviderhigashimatsushimarriottrentino-sudtirolhigashimatsuyamakitaakitadaitomanaustdalhigashimurayamamotorcycleshinshirohigashinarusells-for-uzhhorodhigashinehigashiomitamamurausukitanakagusukumodshintokushimahigashiosakasayamanakakogawahigashishirakawamatakaokalmykiahigashisumiyoshikawaminamiaikitashiobarahigashitsunospamproxyhigashiurawa-mazowszexposeducatorprojectrentino-sued-tirolhigashiyamatokoriyamanashijonawatehigashiyodogawahigashiyoshinogaris-a-geekazunotogawahippythonanywherealminanohiraizumisatokaizukaluganskddiamondshintomikasaharahirakatashinagawahiranais-a-goodyearhirarahiratsukagawahirayahikobeatshinyoshitomiokamisunagawahitachiomiyakehitachiotaketakarazukamaishimodatehitradinghjartdalhjelmelandholyhomegoodshiojirishirifujiedahomeipikehomelinuxn--3e0b707ehomesecuritymacaparecidahomesecuritypcateringebungotakadaptableclerc66116-balsfjordeltaiwanumatajimidsundeportebinatsukigatakahamalvik8s3-ap-northeast-3utilities-12charstadaokagakirunocelotenkawadlugolekadena4ufcfanimsiteasypanelblagrigentobishimafeloansncf-ipfstdlibestadultatarantoyonakagyokutoyonezawapartments3-ap-northeast-123webseiteckidsmynascloudfrontierimo-siemenscaledekaascolipicenoboribetsubsc-paywhirlimitedds3-accesspoint-fips3-ap-east-123miwebaccelastx4432-b-datacenterprisesakihokuizumoarekepnord-aurdalipaynow-dns-dynamic-dnsabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniajureggio-calabriaknoluoktagajoboji234lima-citychyattorneyagawafflecellclstagehirnayorobninsk123kotisivultrobjectselinogradimo-i-ranamizuhobby-siteaches-yogano-ip-ddnsgeekgalaxyzgierzgorakrehamnfshostrowwlkpnftstorage164-balsan-suedtirolillyokozeastus2000123paginawebadorsiteshikagamiishibechambagricoharugbydgoszczecin-addrammenuorogerscbgdyniaktyubinskaunicommuneustarostwodzislawdev-myqnapcloudflarecn-northwest-123sitewebcamauction-acornikonantotalimanowarudakunexus-2038homesenseeringhomeskleppilottottoris-a-greenhomeunixn--3hcrj9catfoodraydnsalvadorhondahonjyoitakasagonohejis-a-guruzshioyaltakkolobrzegersundongthapmircloudnshome-webservercelliguriahornindalhorsells-itrentino-suedtirolhorteneiheijis-a-hard-workershirahamatonbetsupportrentinoa-adigehospitalhotelwithflightshirakomaganehotmailhoyangerhoylandetakasakitaurahrsnillfjordhungyenhurdalhurumajis-a-hunterhyllestadhyogoris-a-knightpointtokashikitchenhypernodessaitokamachippubetsubetsugaruhyugawarahyundaiwafuneis-uberleetrentinoaltoadigeis-very-badis-very-evillasalleirvikharkovallee-d-aosteis-very-goodis-very-niceis-very-sweetpepperugiais-with-thebandoomdnsiskinkyowariasahikawaisk01isk02jellybeanjenv-arubahcavuotnagahamaroygardenflfanjeonnamsosnowiecaxiaskoyabenoopssejny-1jetztrentinos-tiroljevnakerjewelryjlljls-sto1jls-sto2jls-sto365jmpioneerjnjcloud-ver-jpcatholicurus-3joyentrentinostiroljoyokaichibahccavuotnagaivuotnagaokakyotambabybluebitemasekd1jozis-a-llamashikiwakuratejpmorgangwonjpnjprshoujis-a-musiciankoseis-a-painterhostsolutionshiraokamitsuekosheroykoshimizumakis-a-patsfankoshugheshwiiheyahoooshikamagayaitakashimarshallstatebankhplaystation-cloudsitekosugekotohiradomainsurealtypo3serverkotourakouhokumakogenkounosunnydaykouyamatlabcn-north-1kouzushimatrixn--41akozagawakozakis-a-personaltrainerkozowilliamhillkppspdnsigdalkrasnikahokutokyotangopocznore-og-uvdalkrasnodarkredumbrellapykrelliankristiansandcatsiiitesilklabudhabikinokawabajddarqhachirogatakanabeardubaioiraseekatsushikabedzin-brb-hostingkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-photographerokuappinkfh-muensterkrymisasaguris-a-playershiftrentinoaadigekumamotoyamatsumaebashimogosenkumanowtvalleedaostekumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-republicanonoichinosekigaharakunitachiaraisaijorpelandkunitomigusukukis-a-rockstarachowicekunneppubtlsimple-urlkuokgroupiwatekurgankurobeebyteappleykurogiminamiawajikis-a-socialistockholmestrandkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusupabaseminekutchanelkutnokuzumakis-a-techietis-a-liberalkvafjordkvalsundkvamfamplifyappchizip6kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspectrumisawamjondalenmonza-brianzapposirdalmonza-e-della-brianzaptonsbergmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsushigemoriyoshiminamibosoftwarendalenugmormonstermoroyamatsuuramortgagemoscowinbarrel-of-knowledgekey-stagingjerstadigickaracolognemrstudio-prodoyonagoyauthgearapps-1and1moseushimoichikuzenmosjoenmoskenesiskomakis-a-therapistoiamosslupskmpspbaremetalpha-myqnapcloudaccess3-sa-east-1mosviknx-serversicherungmotegirlymoviemovimientoolslzmtrainingmuikamiokameokameyamatotakadamukodairamunakatanemuosattemupixolinodeusercontentrentinosud-tirolmurmanskomatsushimasudamurotorcraftrentinosudtirolmusashinodesakatakayamatsuzakis-an-accountantshiratakahagiangmuseumisconfusedmusicanthoboleslawiecommerce-shopitsitevaksdalmutsuzawamutualmy-vigormy-wanggoupilemyactivedirectorymyaddrangedalmyamazeplaymyasustor-elvdalmycloudnasushiobaramydattolocalcertrentinosued-tirolmydbservermyddnskingmydissentrentinosuedtirolmydnsmolaquilarvikomforbargainstitutemp-dnswatches3-us-east-2mydobissmarterthanyoumydrobofageorgeorgiamydsmushcdn77-securecipescaracalculatorskenmyeffectrentinsud-tirolmyfastly-edgemyfirewalledreplittlestargardmyforumishimatsusakahoginozawaonsennanmokurennebuyshousesimplesitemyfritzmyftpaccessojampanasonichernovtsydneymyhome-servermyjinomykolaivencloud66mymailermymediapchiryukyuragifuchungbukharanzanishinoomotegoismailillehammerfeste-ipartsamegawamynetnamegawamyokohamamatsudamypepizzamypetsokananiimilanoticiassurfastly-terrariuminamiizukaminoyamaxunison-servicesaxomyphotoshibalena-devicesokndalmypiemontemypsxn--42c2d9amyrdbxn--45br5cylmysecuritycamerakermyshopblocksolardalmyshopifymyspreadshopselectrentinsudtirolmytabitordermythic-beastsolundbeckommunalforbundmytis-a-bloggermytuleap-partnersomamyvnchitachinakagawassamukawatarittogitsuldalutskartuzymywirebungoonoplurinacionalpmnpodhalepodlasiellakdnepropetrovskanlandpodzonepohlpoivronpokerpokrovskomonotteroypolkowicepoltavalle-aostavernpolyspacepomorzeszowindowsserveftplatter-appkommuneponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-an-actresshishikuis-a-libertarianpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprereleaseoullensakerprgmrprimetelprincipenzaprivatelinkyard-cloudletsomnarvikomorotsukaminokawanishiaizubangeprivatizehealthinsuranceprogressivegarsheiyufueliv-dnsoowinepromoliserniapropertysnesopotrentinsued-tirolprotectionprotonetrentinsuedtirolprudentialpruszkowinnersor-odalprvcyprzeworskogpunyukis-an-anarchistoloseyouripinokofuefukihabororoshisogndalpupulawypussycatanzarowiosor-varangerpvhackerpvtrentoyosatoyookaneyamazoepwchitosetogliattipsamnangerpzqotoyohashimotoyakokamimineqponiatowadaqslgbtrevisognequalifioapplatterpl-wawsappspacehostedpicardquangngais-an-artistordalquangninhthuanquangtritonoshonais-an-engineeringquickconnectroandindependent-inquest-a-la-masionquicksytesorfoldquipelementsorocabalestrandabergamochizukijobservablehqldquizzesorreisahayakawakamiichinomiyagithubpreviewskrakowitdkontoguraswinoujscienceswissphinxn--45brj9chonanbunkyonanaoshimaringatlanbibaiduckdnsamparachutinglugsjcbnpparibashkiriasyno-dspjelkavikongsbergsynology-diskstationsynology-dspockongsvingertushungrytuvalle-daostaobaolbia-tempio-olbiatempioolbialowiezaganquangnamasteigenoamishirasatochigiftsrhtrogstadtuxfamilytuyenquangbinhthuantwmailvegasrlvelvetromsohuissier-justiceventurestaurantrustkanieruchomoscientistoripresspydebergvestfoldvestnesrvaomoriguchiharaffleentrycloudflare-ipfsortlandvestre-slidrecreationvestre-totennishiawakuravestvagoyvevelstadvfstreakusercontentroitskoninfernovecorealtorvibo-valentiavibovalentiavideovinhphuchoshichikashukudoyamakeupartysfjordrivelandrobakamaihd-stagingmbhartinnishinoshimattelemarkhangelskaruizawavinnicapitalonevinnytsiavipsinaapplockervirginankokubunjis-byklecznagatorokunohealth-carereformincommbankhakassiavirtual-uservecounterstrikevirtualservervirtualuserveexchangevisakuholeckobierzyceviterboliviajessheimperiavivianvivoryvixn--45q11chowdervlaanderennesoyvladikavkazimierz-dolnyvladimirvlogisticstreamlitapplcube-serversusakis-an-actorvmitourismartlabelingvolvologdanskontumintshowavolyngdalvoorlopervossevangenvotevotingvotoyotap-southeast-5vps-hostreaklinkstrippervusercontentrvaporcloudwiwatsukiyonotairesindevicenzaokinawashirosatochiokinoshimagazinewixsitewixstudio-fipstrynwjgorawkzwloclawekonyvelolipopmcdirwmcloudwmelhustudynamisches-dnsorumisugitomobegetmyipifony-2wmflabstuff-4-salewoodsidell-ogliastrapiapplinzis-certifiedworldworse-thanhphohochiminhadanorthflankatsuyamassa-carrara-massacarraramassabunzenwowithgoogleapiszwpdevcloudwpenginepoweredwphostedmailwpmucdn77-sslingwpmudevelopmentrysiljanewaywpsquaredwritesthisblogoiplumbingotpantheonsitewroclawsglobalacceleratorahimeshimakanegasakievennodebalancernwtcp4wtfastlylbarefootballooningjerdrumemergencyonabarumemorialivornobservereitatsunofficialolitapunkapsienamsskoganeindependent-panelombardiademfakefurniturealestatefarmerseinemrnotebooks-prodeomniwebthings3-object-lambdauthgear-stagingivestbyglandroverhallair-traffic-controllagdenesnaaseinet-freaks3-deprecatedgcagliarissadistgstagempresashibetsukuiitatebayashikaoirmembers3-eu-central-1kapp-ionosegawafaicloudineat-urlive-websitehimejibmdevinapps3-ap-southeast-1337wuozuerichardlillesandefjordwwwithyoutuberspacewzmiuwajimaxn--4it797koobindalxn--4pvxs4allxn--54b7fta0cchromediatechnologyeongbukarumaifmemsetkmaxxn--1ctwolominamatarpitksatmalluxenishiokoppegardrrxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49chungnamdalseidfjordtvsangotsukitahiroshimarcherkasykkylvenneslaskerrypropertiesanjotelulublindesnesannanishitosashimizunaminamidaitolgaularavellinodeobjectsannoheliohostrodawaraxn--5rtq34kooris-a-nascarfanxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264churchaselfipirangallupsunappgafanishiwakinuyamashinazawaxn--80aaa0cvacationstufftoread-booksnesoundcastreak-linkomvuxn--3pxu8khmelnitskiyamassivegridxn--80adxhksurnadalxn--80ao21axn--80aqecdr1axn--80asehdbarrell-of-knowledgesuite-stagingjesdalombardyn-vpndns3-us-gov-east-1xn--80aswgxn--80audnedalnxn--8dbq2axn--8ltr62kopervikhmelnytskyivalleeaostexn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencoreapiacenzachpomorskiengiangxn--90a3academiamibubbleappspotagerxn--90aeroportsinfolkebibleasingrok-freeddnsfreebox-osascoli-picenogatachikawakayamadridvagsoyerxn--90aishobaraoxn--90amckinseyxn--90azhytomyradweblikes-piedmontuckerxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byameloyxn--asky-iraxn--aurskog-hland-jnbarsycenterprisecloudbeesusercontentattoolforgerockyonagunicloudiscordsays3-us-gov-west-1xn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbarsyonlinequipmentaveusercontentawktoyonomurauthordalandroidienbienishiazaiiyamanouchikujolsterehabmereisenishigotembaixadavvesiidaknongivingjemnes3-eu-north-1xn--bck1b9a5dre4ciprianiigatairaumalatvuopmicrosoftbankasaokamikoaniikappudopaaskvollocaltonetlifyinvestmentsanokashibatakatsukiyosembokutamakiyosunndaluxuryxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2hosted-by-previderxn--bjarky-fyanagawaxn--bjddar-ptarumizusawaxn--blt-elabkhaziamallamaceiobbcircleaningmodelscapetownnews-stagingmxn--1lqs03nissandoyxn--bmlo-grafana-developerauniterois-coolblogdnshisuifuettertdasnetzxn--bod-2naturalxn--bozen-sdtirol-2obihirosakikamijimayfirstorjdevcloudjiffyxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagespeedmobilizeropslattumbriaxn--brum-voagatulaspeziaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbasicserver-on-webpaaskimitsubatamicrolightingjovikaragandautoscanaryggeemrappui-productions3-eu-west-1xn--c1avgxn--c2br7gxn--c3s14mitoyoakexn--cck2b3basilicataniavocats3-eu-west-2xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-foundationxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-storagencymrulezajskiptveterinaireadthedocs-hostedogawarabikomaezakishimabarakawagoexn--czr694basketballfinanzlgkpmglassessments3-us-west-1xn--czrs0t0xn--czru2dxn--d1acj3batsfjordiscordsezpisdnipropetrovskygearapparasiteu-2xn--d1alfastvps-serverisignxn--d1atunesquaresinstagingxn--d5qv7z876ciscofreakadns-cloudflareglobalashovhachijoinvilleirfjorduponthewifidelitypeformesswithdnsantamariakexn--davvenjrga-y4axn--djrs72d6uyxn--djty4koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpinbrowsersafetymarketshiraois-a-landscaperspectakasugais-a-lawyerxn--dnna-graingerxn--drbak-wuaxn--dyry-iraxn--e1a4cistrondheimeteorappassenger-associationissayokoshibahikariyalibabacloudcsantoandrecifedexperts-comptablesanukinzais-a-bruinsfanissedalvivanovoldaxn--eckvdtc9dxn--efvn9surveysowaxn--efvy88hadselbuzentsujiiexn--ehqz56nxn--elqq16haebaruericssongdalenviknakatombetsumitakagildeskaliszxn--eveni-0qa01gaxn--f6qx53axn--fct429kosaigawaxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcitadelhichisochimkentmpatriaxn--fiq64bauhauspostman-echofunatoriginstances3-us-west-2xn--fiqs8susonoxn--fiqz9suzakarpattiaaxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbentleyoriikarasjohkamikitayamatsurindependent-review-credentialless-staticblitzw-staticblitzxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grajewolterskluwerxn--frna-woaxn--frya-hraxn--fzc2c9e2citicaravanylvenetogakushimotoganexn--fzys8d69uvgmailxn--g2xx48civilaviationionjukujitawaravennaharimalborkdalxn--gckr3f0fauskedsmokorsetagayaseralingenovaraxn--gecrj9clancasterxn--ggaviika-8ya47hagakhanhhoabinhduongxn--gildeskl-g0axn--givuotna-8yanaizuxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-gonexn--gmqw5axn--gnstigbestellen-zvbentrendhostingleezeu-3xn--gnstigliefern-wobiraxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3evenesuzukanazawaxn--h2brj9c8cldmail-boxfuseljeducationporterxn--h3cuzk1dielddanuorris-into-animein-vigorlicexn--hbmer-xqaxn--hcesuolo-7ya35beppublic-inquiryoshiokanumazuryurihonjouwwebhoptokigawavoues3-eu-west-3xn--hebda8beskidyn-ip24xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-fleeklogesquare7xn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyandexcloudxn--io0a7is-into-carshitaramaxn--j1adpdnsupdaterxn--j1aefbsbxn--2m4a15exn--j1ael8bestbuyshoparenagareyamagentositenrikuzentakataharaholtalengerdalwaysdatabaseballangenkainanaejrietiengiangheannakadomarineen-rootaribeiraogakicks-assnasaarlandiscountry-snowplowiczeladzxn--j1amhagebostadxn--j6w193gxn--jlq480n2rgxn--jlster-byaotsurgeryxn--jrpeland-54axn--jvr189mittwaldserverxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--4dbgdty6choyodobashichinohealthcareersamsclubartowest1-usamsungminakamichikaiseiyoichipsandvikcoromantovalle-d-aostakinouexn--koluokta-7ya57haibarakitakamiizumisanofidonnakaniikawatanaguraxn--kprw13dxn--kpry57dxn--kput3is-into-cartoonshizukuishimojis-a-linux-useranishiaritabashikshacknetlibp2pimientaketomisatourshiranukamitondabayashiogamagoriziaxn--krager-gyasakaiminatoyotomiyazakis-into-gamessinaklodzkochikushinonsenasakuchinotsuchiurakawaxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfirmalselveruminisitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugitlabbvieeexn--kvnangen-k0axn--l-1fairwindsuzukis-an-entertainerxn--l1accentureklamborghinikolaeventsvalbardunloppadoval-d-aosta-valleyxn--laheadju-7yasuokannamimatakatoris-leetrentinoalto-adigexn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52bhzc01xn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctfcloudflareanycastcgroupowiat-band-campaignoredstonedre-eikerxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liaclerkstagentsaobernardovre-eikerxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesvchoseikarugalsacexn--mgb9awbfbx-oschokokekscholarshipschoolbusinessebytomaridagawarmiastapleschoolsztynsetranoyxn--mgba3a3ejtunkonsulatinowruzhgorodxn--mgba3a4f16axn--mgba3a4fra1-dellogliastraderxn--mgba7c0bbn0axn--mgbaam7a8haiduongxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00bialystokkeymachineu-4xn--mgbai9azgqp6jelasticbeanstalkhersonlanxesshizuokamogawaxn--mgbayh7gparaglidingxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexperimentsveioxn--mgbpl2fhskypecoris-localhostcertificationxn--mgbqly7c0a67fbclever-clouderavpagexn--mgbqly7cvafricapooguyxn--mgbt3dhdxn--mgbtf8fldrvareservdxn--mgbtx2bielawalbrzycharternopilawalesundiscourses3-website-ap-northeast-1xn--mgbx4cd0abogadobeaemcloud-ip-dynamica-west-1xn--mix082fbxoschulplattforminamimakis-a-catererxn--mix891fedjeepharmacienschulserverxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cleverappsaogoncanva-appsaotomelbournexn--mkru45is-lostrolekamakurazakiwielunnerxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-not-axn--mosjen-eyatsukanoyaizuwakamatsubushikusakadogawaxn--mot-tlavangenxn--mre-og-romsdal-qqbuservebolturindalxn--msy-ula0haiphongolffanshimosuwalkis-a-designerxn--mtta-vrjjat-k7aflakstadotsurugimbiella-speziaxarnetbankanzakiyosatokorozawaustevollpagest-mon-blogueurovision-ranchernigovernmentdllivingitpagemprendeatnuh-ohtawaramotoineppueblockbusterniizaustrheimdbambinagisobetsucks3-ap-southeast-2xn--muost-0qaxn--mxtq1miuraxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbrk0cexn--nit225kosakaerodromegalloabatobamaceratabusebastopoleangaviikafjordxn--nmesjevuemie-tcbalsan-sudtirolkuszczytnord-fron-riopretodayxn--nnx388axn--nodeloittexn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilyxn--o3cw4hair-surveillancexn--o3cyx2axn--od0algardxn--od0aq3bielskoczoweddinglitcheap-south-2xn--ogbpf8flekkefjordxn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfolksvelvikonskowolayangroupippugliaxn--p1ais-not-certifiedxn--pgbs0dhakatanortonkotsumomodenakatsugawaxn--porsgu-sta26fedorainfracloudfunctionschwarzgwesteuropencraftransfer-webappharmacyou2-localplayerxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4clickrisinglesjaguarvodkagaminombrendlyngenebakkeshibukawakeliwebhostingouv0xn--qcka1pmcprequalifymeinforumzxn--qqqt11miyazure-mobilevangerxn--qxa6axn--qxamiyotamanoxn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-savedxn--rennesy-v1axn--rhkkervju-01afedorapeopleikangerxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bieszczadygeyachimataijinderoyusuharazurefdietateshinanomachintaifun-dnsaliases121xn--rros-granvindafjordxn--rskog-uuaxn--rst-0navigationxn--rsta-framercanvasvn-repospeedpartnerxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9clientoyotsukaidownloadurbanamexnetfylkesbiblackbaudcdn-edgestackhero-networkinggroupperxn--sandnessjen-ogbizxn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphicswidnicaobangxn--skierv-utazurecontainerimamateramombetsupplieswidnikitagatamayukuhashimokitayamaxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navoizumizakis-slickharkivallee-aosteroyxn--slt-elabievathletajimabaria-vungtaudiopsys3-website-ap-southeast-1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbifukagawalmartaxiijimarugame-hostrowieconomiasagaeroclubmedecin-berlindasdaeguambulancechireadmyblogsytecnologiazurestaticappspaceusercontentproxy9guacuiababia-goraclecloudappschaefflereggiocalabriaurland-4-salernooreggioemiliaromagnarusawaurskog-holandinggff5xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbigv-infolldalomoldegreeu-central-2xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bookkeepermashikexn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbiharvanedgeappengineu-south-1xn--stre-toten-zcbihoronobeokayamagasakikuchikuseihicampinashikiminohostfoldiscoverbaniazurewebsitests3-external-1xn--t60b56axn--tckwebview-assetswiebodzindependent-commissionxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbikedaejeonbuk0emmafann-arborlandd-dnsfor-better-thanhhoarairkitapps-audiblebesbyencowayokosukanraetnaamesjevuemielnogiehtavuoatnabudejjuniper2-ddnss3-123minsidaarborteamsterdamnserverseating-organicbcg123homepagexl-o-g-i-navyokote123hjemmesidealerdalaheadjuegoshikibichuo0o0g0xn--trentin-sdtirol-7vbiomutazas3-website-ap-southeast-2xn--trentino-sd-tirol-c3birkenesoddtangentapps3-website-eu-west-1xn--trentino-sdtirol-szbittermezproxyusuitatamotors3-website-sa-east-1xn--trentinosd-tirol-rzbjarkoyuullensvanguardisharparisor-fronishiharaxn--trentinosdtirol-7vbjerkreimmobilieniwaizumiotsukumiyamazonaws-cloud9xn--trentinsd-tirol-6vbjugnieznorddalomzaporizhzhiaxn--trentinsdtirol-nsblackfridaynightayninhaccalvinklein-butterepairbusanagochigasakindigenakayamarumorimachidaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvarggatromsakegawaxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturystykanmakiyokawaraxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtuscanyxn--valle-d-aoste-ehboehringerikerxn--valleaoste-e7axn--valledaoste-ebbvaapstempurlxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbloombergentingliwiceu-south-2xn--vestvgy-ixa6oxn--vg-yiablushangrilaakesvuemieleccevervaultgoryuzawaxn--vgan-qoaxn--vgsy-qoa0j0xn--vgu402clinicarbonia-iglesias-carboniaiglesiascarboniaxn--vhquvaroyxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bmoattachments3-website-us-east-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cliniquenoharaxn--wgbl6axn--xhq521bms3-website-us-gov-west-1xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubaclieu-1xn--y9a3aquarelleborkangerxn--yer-znavuotnarashinoharaxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4gbriminiserverxn--ystre-slidre-ujbmwcloudnonproddaemongolianishiizunazukindustriaxn--zbx025dxn--zf0avxn--4it168dxn--zfr164bnrweatherchannelsdvrdns3-website-us-west-1xnbayernxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000000..047cb30eb151 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,205 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" is a private domain and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndexByte(s, '.') + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) + icannNode = u&(1<>= nodesBitsICANN + u = children.get(u & (1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<